From 4119a83483e3bfe257614d66b31c7da22d1c02d1 Mon Sep 17 00:00:00 2001 From: Erfan Nariman Date: Tue, 30 Jun 2020 00:12:39 +0200 Subject: [PATCH 001/632] Change for black --- pandas/io/parsers.py | 68 +++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c427d3a198b10..6a71d1f00946a 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -521,7 +521,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): "float_precision": None, } -_fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None} +_fwf_defaults = {"col_specs": "infer", "infer_nrows": 100, "col_widths": None} _c_unsupported = {"skipfooter"} _python_unsupported = {"low_memory", "float_precision"} @@ -757,8 +757,8 @@ def read_table( def read_fwf( filepath_or_buffer: FilePathOrBuffer, - colspecs="infer", - widths=None, + col_specs="infer", + col_widths=None, infer_nrows=100, **kwds, ): @@ -785,18 +785,18 @@ def read_fwf( By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. - colspecs : list of tuple (int, int) or 'infer'. optional + col_specs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). - widths : list of int, optional - A list of field widths which can be used instead of 'colspecs' if + col_widths : list of int, optional + A list of column widths which can be used instead of 'col_specs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the - `colspecs`. + `col_specs`. .. versionadded:: 0.24.0 **kwds : optional @@ -818,19 +818,19 @@ def read_fwf( >>> pd.read_fwf('data.csv') # doctest: +SKIP """ # Check input arguments. - if colspecs is None and widths is None: - raise ValueError("Must specify either colspecs or widths") - elif colspecs not in (None, "infer") and widths is not None: - raise ValueError("You must specify only one of 'widths' and 'colspecs'") - - # Compute 'colspecs' from 'widths', if specified. - if widths is not None: - colspecs, col = [], 0 - for w in widths: - colspecs.append((col, col + w)) + if col_specs is None and col_widths is None: + raise ValueError("Must specify either col_specs or col_widths") + elif col_specs not in (None, "infer") and col_widths is not None: + raise ValueError("You must specify only one of 'col_widths' and 'col specs'") + + # Compute 'col_specs' from 'col_widths', if specified. + if col_widths is not None: + col_specs, col = [], 0 + for w in col_widths: + col_specs.append((col, col + w)) col += w - kwds["colspecs"] = colspecs + kwds["col_specs"] = col_specs kwds["infer_nrows"] = infer_nrows kwds["engine"] = "python-fwf" return _read(filepath_or_buffer, kwds) @@ -3658,30 +3658,32 @@ class FixedWidthReader(abc.Iterator): A reader of fixed-width lines. """ - def __init__(self, f, colspecs, delimiter, comment, skiprows=None, infer_nrows=100): + def __init__( + self, f, col_specs, delimiter, comment, skiprows=None, infer_nrows=100 + ): self.f = f self.buffer = None self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t " self.comment = comment - if colspecs == "infer": - self.colspecs = self.detect_colspecs( + if col_specs == "infer": + self.col_specs = self.detect_colspecs( infer_nrows=infer_nrows, skiprows=skiprows ) else: - self.colspecs = colspecs + self.col_specs = col_specs - if not isinstance(self.colspecs, (tuple, list)): + if not isinstance(self.col_specs, (tuple, list)): raise TypeError( "column specifications must be a list or tuple, " - f"input was a {type(colspecs).__name__}" + f"input was a {type(col_specs).__name__}" ) - for colspec in self.colspecs: + for col_specs in self.col_specs: if not ( - isinstance(colspec, (tuple, list)) - and len(colspec) == 2 - and isinstance(colspec[0], (int, np.integer, type(None))) - and isinstance(colspec[1], (int, np.integer, type(None))) + isinstance(col_specs, (tuple, list)) + and len(col_specs) == 2 + and isinstance(col_specs[0], (int, np.integer, type(None))) + and isinstance(col_specs[1], (int, np.integer, type(None))) ): raise TypeError( "Each column specification must be " @@ -3755,8 +3757,8 @@ def __next__(self): line = next(self.f) else: line = next(self.f) - # Note: 'colspecs' is a sequence of half-open intervals. - return [line[fromm:to].strip(self.delimiter) for (fromm, to) in self.colspecs] + # Note: 'col_specs' is a sequence of half-open intervals. + return [line[fromm:to].strip(self.delimiter) for (fromm, to) in self.col_specs] class FixedWidthFieldParser(PythonParser): @@ -3767,14 +3769,14 @@ class FixedWidthFieldParser(PythonParser): def __init__(self, f, **kwds): # Support iterators, convert to a list. - self.colspecs = kwds.pop("colspecs") + self.col_specs = kwds.pop("col_specs") self.infer_nrows = kwds.pop("infer_nrows") PythonParser.__init__(self, f, **kwds) def _make_reader(self, f): self.data = FixedWidthReader( f, - self.colspecs, + self.col_specs, self.delimiter, self.comment, self.skiprows, From af312e5635c1cd311e8ffad81ac00848b5d1f57e Mon Sep 17 00:00:00 2001 From: Erfan Nariman Date: Tue, 30 Jun 2020 00:14:21 +0200 Subject: [PATCH 002/632] Changes for black in tests --- pandas/tests/base/test_value_counts.py | 2 +- pandas/tests/io/parser/test_read_fwf.py | 94 +++++++++++++------------ 2 files changed, 51 insertions(+), 45 deletions(-) diff --git a/pandas/tests/base/test_value_counts.py b/pandas/tests/base/test_value_counts.py index de04c30432e6f..2b8a918505255 100644 --- a/pandas/tests/base/test_value_counts.py +++ b/pandas/tests/base/test_value_counts.py @@ -208,7 +208,7 @@ def test_value_counts_datetime64(index_or_series): ) f = StringIO(txt) df = pd.read_fwf( - f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"] + f, col_widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"] ) s = klass(df["dt"].copy()) diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index e982667f06f31..fcfdab5a66f4c 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -40,7 +40,7 @@ def test_basic(): tm.assert_frame_equal(result, expected) -def test_colspecs(): +def test_col_specs(): data = """\ A B C D E 201158 360.242940 149.910199 11950.7 @@ -49,8 +49,8 @@ def test_colspecs(): 201161 413.836124 184.375703 11916.8 201162 502.953953 173.237159 12468.3 """ - colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] - result = read_fwf(StringIO(data), colspecs=colspecs) + col_specs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + result = read_fwf(StringIO(data), col_specs=col_specs) expected = DataFrame( [ @@ -74,7 +74,7 @@ def test_widths(): 2011 61 413.836124 184.375703 11916.8 2011 62 502.953953 173.237159 12468.3 """ - result = read_fwf(StringIO(data), widths=[5, 5, 13, 13, 7]) + result = read_fwf(StringIO(data), col_widths=[5, 5, 13, 13, 7]) expected = DataFrame( [ @@ -104,8 +104,8 @@ def test_non_space_filler(): 201161~~~~413.836124~~~184.375703~~~11916.8 201162~~~~502.953953~~~173.237159~~~12468.3 """ - colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] - result = read_fwf(StringIO(data), colspecs=colspecs, delimiter="~") + col_specs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + result = read_fwf(StringIO(data), col_specs=col_specs, delimiter="~") expected = DataFrame( [ @@ -129,10 +129,10 @@ def test_over_specified(): 201161 413.836124 184.375703 11916.8 201162 502.953953 173.237159 12468.3 """ - colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + col_specs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] with pytest.raises(ValueError, match="must specify only one of"): - read_fwf(StringIO(data), colspecs=colspecs, widths=[6, 10, 10, 7]) + read_fwf(StringIO(data), col_specs=col_specs, col_widths=[6, 10, 10, 7]) def test_under_specified(): @@ -145,7 +145,7 @@ def test_under_specified(): 201162 502.953953 173.237159 12468.3 """ with pytest.raises(ValueError, match="Must specify either"): - read_fwf(StringIO(data), colspecs=None, widths=None) + read_fwf(StringIO(data), col_specs=None, col_widths=None) def test_read_csv_compat(): @@ -167,20 +167,20 @@ def test_read_csv_compat(): 201161 413.836124 184.375703 11916.8 201162 502.953953 173.237159 12468.3 """ - colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] - result = read_fwf(StringIO(fwf_data), colspecs=colspecs) + col_specs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)] + result = read_fwf(StringIO(fwf_data), col_specs=col_specs) tm.assert_frame_equal(result, expected) def test_bytes_io_input(): result = read_fwf( - BytesIO("שלום\nשלום".encode("utf8")), widths=[2, 2], encoding="utf8" + BytesIO("שלום\nשלום".encode("utf8")), col_widths=[2, 2], encoding="utf8" ) expected = DataFrame([["של", "ום"]], columns=["של", "ום"]) tm.assert_frame_equal(result, expected) -def test_fwf_colspecs_is_list_or_tuple(): +def test_fwf_col_specs_is_list_or_tuple(): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 @@ -193,10 +193,10 @@ def test_fwf_colspecs_is_list_or_tuple(): msg = "column specifications must be a list or tuple.+" with pytest.raises(TypeError, match=msg): - read_fwf(StringIO(data), colspecs={"a": 1}, delimiter=",") + read_fwf(StringIO(data), col_specs={"a": 1}, delimiter=",") -def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(): +def test_fwf_col_specs_is_list_or_tuple_of_two_element_tuples(): data = """index,A,B,C,D foo,2,3,4,5 bar,7,8,9,10 @@ -213,7 +213,7 @@ def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(): @pytest.mark.parametrize( - "colspecs,exp_data", + "col_specs,exp_data", [ ([(0, 3), (3, None)], [[123, 456], [456, 789]]), ([(None, 3), (3, 6)], [[123, 456], [456, 789]]), @@ -221,7 +221,7 @@ def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(): ([(None, None), (3, 6)], [[123456, 456], [456789, 789]]), ], ) -def test_fwf_colspecs_none(colspecs, exp_data): +def test_fwf_col_specs_none(col_specs, exp_data): # see gh-7079 data = """\ 123456 @@ -229,7 +229,7 @@ def test_fwf_colspecs_none(colspecs, exp_data): """ expected = DataFrame(exp_data) - result = read_fwf(StringIO(data), colspecs=colspecs, header=None) + result = read_fwf(StringIO(data), col_specs=col_specs, header=None) tm.assert_frame_equal(result, expected) @@ -242,7 +242,7 @@ def test_fwf_colspecs_none(colspecs, exp_data): (10, [[1, 2], [123, 98]]), ], ) -def test_fwf_colspecs_infer_nrows(infer_nrows, exp_data): +def test_fwf_col_specs_infer_nrows(infer_nrows, exp_data): # see gh-15138 data = """\ 1 2 @@ -259,7 +259,7 @@ def test_fwf_regression(): # # Turns out "T060" is parsable as a datetime slice! tz_list = [1, 10, 20, 30, 60, 80, 100] - widths = [16] + [8] * len(tz_list) + col_widths = [16] + [8] * len(tz_list) names = ["SST"] + [f"T{z:03d}" for z in tz_list[1:]] data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192 @@ -274,7 +274,7 @@ def test_fwf_regression(): index_col=0, header=None, names=names, - widths=widths, + col_widths=col_widths, parse_dates=True, date_parser=lambda s: datetime.strptime(s, "%Y%j%H%M%S"), ) @@ -305,7 +305,7 @@ def test_fwf_for_uint8(): 1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa df = read_fwf( StringIO(data), - colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)], + col_specs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)], names=["time", "pri", "pgn", "dst", "src", "data"], converters={ "pgn": lambda x: int(x, 16), @@ -334,10 +334,10 @@ def test_fwf_comment(comment): """ data = data.replace("#", comment) - colspecs = [(0, 3), (4, 9), (9, 25)] + col_specs = [(0, 3), (4, 9), (9, 25)] expected = DataFrame([[1, 2.0, 4], [5, np.nan, 10.0]]) - result = read_fwf(StringIO(data), colspecs=colspecs, header=None, comment=comment) + result = read_fwf(StringIO(data), col_specs=col_specs, header=None, comment=comment) tm.assert_almost_equal(result, expected) @@ -349,11 +349,11 @@ def test_fwf_thousands(thousands): """ data = data.replace(",", thousands) - colspecs = [(0, 3), (3, 11), (12, 16)] + col_specs = [(0, 3), (3, 11), (12, 16)] expected = DataFrame([[1, 2334.0, 5], [10, 13, 10.0]]) result = read_fwf( - StringIO(data), header=None, colspecs=colspecs, thousands=thousands + StringIO(data), header=None, col_specs=col_specs, thousands=thousands ) tm.assert_almost_equal(result, expected) @@ -383,8 +383,8 @@ def test_full_file(): 2000-01-07T00:00:00 0.487094399463 0 bar 2000-01-10T00:00:00 0.836648671666 2 baz 2000-01-11T00:00:00 0.157160753327 34 foo""" - colspecs = ((0, 19), (21, 35), (38, 40), (42, 45)) - expected = read_fwf(StringIO(test), colspecs=colspecs) + col_specs = ((0, 19), (21, 35), (38, 40), (42, 45)) + expected = read_fwf(StringIO(test), col_specs=col_specs) result = read_fwf(StringIO(test)) tm.assert_frame_equal(result, expected) @@ -400,8 +400,8 @@ def test_full_file_with_missing(): 2000-01-07T00:00:00 0 bar 2000-01-10T00:00:00 0.836648671666 2 baz 34""" - colspecs = ((0, 19), (21, 35), (38, 40), (42, 45)) - expected = read_fwf(StringIO(test), colspecs=colspecs) + col_specs = ((0, 19), (21, 35), (38, 40), (42, 45)) + expected = read_fwf(StringIO(test), col_specs=col_specs) result = read_fwf(StringIO(test)) tm.assert_frame_equal(result, expected) @@ -419,8 +419,8 @@ def test_full_file_with_spaces(): """.strip( "\r\n" ) - colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70)) - expected = read_fwf(StringIO(test), colspecs=colspecs) + col_specs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70)) + expected = read_fwf(StringIO(test), col_specs=col_specs) result = read_fwf(StringIO(test)) tm.assert_frame_equal(result, expected) @@ -438,8 +438,8 @@ def test_full_file_with_spaces_and_missing(): """.strip( "\r\n" ) - colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70)) - expected = read_fwf(StringIO(test), colspecs=colspecs) + col_specs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70)) + expected = read_fwf(StringIO(test), col_specs=col_specs) result = read_fwf(StringIO(test)) tm.assert_frame_equal(result, expected) @@ -457,8 +457,8 @@ def test_messed_up_data(): """.strip( "\r\n" ) - colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79)) - expected = read_fwf(StringIO(test), colspecs=colspecs) + col_specs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79)) + expected = read_fwf(StringIO(test), col_specs=col_specs) result = read_fwf(StringIO(test)) tm.assert_frame_equal(result, expected) @@ -476,8 +476,8 @@ def test_multiple_delimiters(): "\r\n" ) delimiter = " +~.\\" - colspecs = ((0, 4), (7, 13), (15, 19), (21, 41)) - expected = read_fwf(StringIO(test), colspecs=colspecs, delimiter=delimiter) + col_specs = ((0, 4), (7, 13), (15, 19), (21, 41)) + expected = read_fwf(StringIO(test), col_specs=col_specs, delimiter=delimiter) result = read_fwf(StringIO(test), delimiter=delimiter) tm.assert_frame_equal(result, expected) @@ -495,7 +495,7 @@ def test_variable_width_unicode(): kwargs = dict(header=None, encoding=encoding) expected = read_fwf( - BytesIO(data.encode(encoding)), colspecs=[(0, 4), (5, 9)], **kwargs + BytesIO(data.encode(encoding)), col_specs=[(0, 4), (5, 9)], **kwargs ) result = read_fwf(BytesIO(data.encode(encoding)), **kwargs) tm.assert_frame_equal(result, expected) @@ -507,8 +507,8 @@ def test_dtype(dtype): 1 2 3.2 3 4 5.2 """ - colspecs = [(0, 5), (5, 10), (10, None)] - result = read_fwf(StringIO(data), colspecs=colspecs, dtype=dtype) + col_specs = [(0, 5), (5, 10), (10, None)] + result = read_fwf(StringIO(data), col_specs=col_specs, dtype=dtype) expected = pd.DataFrame( {"a": [1, 3], "b": [2, 4], "c": [3.2, 5.2]}, columns=["a", "b", "c"] @@ -574,7 +574,11 @@ def test_whitespace_preservation(): a bbb ccdd """ result = read_fwf( - StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0], delimiter="\n\t" + StringIO(fwf_data), + col_widths=[3, 3], + header=header, + skiprows=[0], + delimiter="\n\t", ) expected = read_csv(StringIO(csv_data), header=header) tm.assert_frame_equal(result, expected) @@ -589,7 +593,9 @@ def test_default_delimiter(): fwf_data = """ a \tbbb cc\tdd """ - result = read_fwf(StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0]) + result = read_fwf( + StringIO(fwf_data), col_widths=[3, 3], header=header, skiprows=[0] + ) expected = read_csv(StringIO(csv_data), header=header) tm.assert_frame_equal(result, expected) @@ -603,7 +609,7 @@ def test_fwf_compression(compression_only, infer): compression = compression_only extension = "gz" if compression == "gzip" else compression - kwargs = dict(widths=[5, 5], names=["one", "two"]) + kwargs = dict(col_widths=[5, 5], names=["one", "two"]) expected = read_fwf(StringIO(data), **kwargs) data = bytes(data, encoding="utf-8") From 549b0fff87081ded4b875917e42183272558421b Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 29 Jun 2020 16:13:45 -0700 Subject: [PATCH 003/632] CLN: assorted tslibs cleanups, annotations (#35045) --- pandas/_libs/tslibs/conversion.pyx | 2 +- pandas/_libs/tslibs/fields.pyx | 8 +++---- pandas/_libs/tslibs/nattype.pyx | 20 +++++++++--------- pandas/_libs/tslibs/period.pyx | 3 ++- pandas/_libs/tslibs/tzconversion.pyx | 31 ++++++++++++++-------------- 5 files changed, 33 insertions(+), 31 deletions(-) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 0811ba22977fd..884715f482cad 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -77,7 +77,7 @@ cdef inline int64_t cast_from_unit(object ts, str unit) except? -1: return (base * m) + (frac * m) -cpdef inline object precision_from_unit(str unit): +cpdef inline (int64_t, int) precision_from_unit(str unit): """ Return a casting of the unit represented to nanoseconds + the precision to round the fractional part. diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 8d83eeb011866..0e5a6d3c4db46 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -91,7 +91,7 @@ def build_field_sarray(const int64_t[:] dtindex): @cython.wraparound(False) @cython.boundscheck(False) -def get_date_name_field(const int64_t[:] dtindex, object field, object locale=None): +def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None): """ Given a int64-based datetime index, return array of strings of date name based on requested field (e.g. day_name) @@ -141,7 +141,7 @@ def get_date_name_field(const int64_t[:] dtindex, object field, object locale=No @cython.wraparound(False) @cython.boundscheck(False) -def get_start_end_field(const int64_t[:] dtindex, object field, +def get_start_end_field(const int64_t[:] dtindex, str field, object freqstr=None, int month_kw=12): """ Given an int64-based datetime index return array of indicators @@ -386,7 +386,7 @@ def get_start_end_field(const int64_t[:] dtindex, object field, @cython.wraparound(False) @cython.boundscheck(False) -def get_date_field(const int64_t[:] dtindex, object field): +def get_date_field(const int64_t[:] dtindex, str field): """ Given a int64-based datetime index, extract the year, month, etc., field and return an array of these values. @@ -548,7 +548,7 @@ def get_date_field(const int64_t[:] dtindex, object field): @cython.wraparound(False) @cython.boundscheck(False) -def get_timedelta_field(const int64_t[:] tdindex, object field): +def get_timedelta_field(const int64_t[:] tdindex, str field): """ Given a int64-based timedelta index, extract the days, hrs, sec., field and return an array of these values. diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 71f151e6eb876..264013f928d22 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -50,7 +50,7 @@ _nat_scalar_rules[Py_GE] = False # ---------------------------------------------------------------------- -def _make_nan_func(func_name, doc): +def _make_nan_func(func_name: str, doc: str): def f(*args, **kwargs): return np.nan f.__name__ = func_name @@ -58,7 +58,7 @@ def _make_nan_func(func_name, doc): return f -def _make_nat_func(func_name, doc): +def _make_nat_func(func_name: str, doc: str): def f(*args, **kwargs): return c_NaT f.__name__ = func_name @@ -66,7 +66,7 @@ def _make_nat_func(func_name, doc): return f -def _make_error_func(func_name, cls): +def _make_error_func(func_name: str, cls): def f(*args, **kwargs): raise ValueError(f"NaTType does not support {func_name}") @@ -282,31 +282,31 @@ cdef class _NaT(datetime): return NPY_NAT @property - def is_leap_year(self): + def is_leap_year(self) -> bool: return False @property - def is_month_start(self): + def is_month_start(self) -> bool: return False @property - def is_quarter_start(self): + def is_quarter_start(self) -> bool: return False @property - def is_year_start(self): + def is_year_start(self) -> bool: return False @property - def is_month_end(self): + def is_month_end(self) -> bool: return False @property - def is_quarter_end(self): + def is_quarter_end(self) -> bool: return False @property - def is_year_end(self): + def is_year_end(self) -> bool: return False diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index a2250234dbd14..c0641297c4b8a 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -14,6 +14,7 @@ import cython from cpython.datetime cimport ( datetime, + tzinfo, PyDate_Check, PyDateTime_Check, PyDateTime_IMPORT, @@ -1417,7 +1418,7 @@ def extract_freq(ndarray[object] values): @cython.wraparound(False) @cython.boundscheck(False) -def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, object tz): +def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, tzinfo tz): cdef: Py_ssize_t n = len(stamps) int64_t[:] result = np.empty(n, dtype=np.int64) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 02fe203637d62..6e6b106b8f21a 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -5,7 +5,7 @@ import cython from cython import Py_ssize_t from cpython.datetime cimport ( - PyDateTime_IMPORT, PyDelta_Check, datetime, tzinfo) + PyDateTime_IMPORT, PyDelta_Check, datetime, timedelta, tzinfo) PyDateTime_IMPORT import pytz @@ -421,23 +421,22 @@ cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, tzinfo tz, bint to_utc): converted : ndarray[int64_t] """ cdef: - int64_t[:] converted, result + int64_t[:] converted Py_ssize_t i, n = len(vals) int64_t val - if not is_utc(tz): + if is_utc(tz): + converted = vals + elif is_tzlocal(tz): converted = np.empty(n, dtype=np.int64) - if is_tzlocal(tz): - for i in range(n): - val = vals[i] - if val == NPY_NAT: - converted[i] = NPY_NAT - else: - converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc) - else: - converted = _tz_convert_dst(vals, tz, to_utc) + for i in range(n): + val = vals[i] + if val == NPY_NAT: + converted[i] = NPY_NAT + else: + converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc) else: - converted = vals + converted = _tz_convert_dst(vals, tz, to_utc) return converted @@ -471,11 +470,12 @@ cdef inline int64_t _tzlocal_get_offset_components(int64_t val, tzinfo tz, npy_datetimestruct dts datetime dt int64_t delta + timedelta td dt64_to_dtstruct(val, &dts) dt = datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us) - # get_utcoffset (tz.utcoffset under the hood) only makes sense if datetime + # tz.utcoffset only makes sense if datetime # is _wall time_, so if val is a UTC timestamp convert to wall time if not to_utc: dt = dt.replace(tzinfo=tzutc()) @@ -484,7 +484,8 @@ cdef inline int64_t _tzlocal_get_offset_components(int64_t val, tzinfo tz, if fold is not NULL: fold[0] = dt.fold - return int(get_utcoffset(tz, dt).total_seconds()) * 1000000000 + td = tz.utcoffset(dt) + return int(td.total_seconds() * 1_000_000_000) cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True, From bce11e281bc89f964c63cad96645f1234560ca27 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Mon, 29 Jun 2020 16:15:45 -0700 Subject: [PATCH 004/632] DOC: Add example of NonFixedVariableWindowIndexer usage (#34994) --- doc/source/user_guide/computation.rst | 12 ++++++++++++ doc/source/whatsnew/v1.1.0.rst | 1 + pandas/api/indexers/__init__.py | 13 +++++++++++-- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst index 897e5d5fb0e24..3a524996ea6d9 100644 --- a/doc/source/user_guide/computation.rst +++ b/doc/source/user_guide/computation.rst @@ -597,6 +597,18 @@ You can view other examples of ``BaseIndexer`` subclasses `here Date: Tue, 30 Jun 2020 00:19:44 +0100 Subject: [PATCH 005/632] CLN: make Info and DataFrameInfo subclasses (#34743) --- pandas/core/frame.py | 32 ++-- pandas/io/formats/info.py | 377 ++++++++++++++++++++++++-------------- 2 files changed, 252 insertions(+), 157 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4fc96993f5b2e..f6ea6f51d88a8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -141,7 +141,7 @@ from pandas.io.common import get_filepath_or_buffer from pandas.io.formats import console, format as fmt -from pandas.io.formats.info import info +from pandas.io.formats.info import DataFrameInfo import pandas.plotting if TYPE_CHECKING: @@ -2462,11 +2462,11 @@ def to_html( RangeIndex: 5 entries, 0 to 4 Data columns (total 3 columns): - # Column Non-Null Count Dtype + # Column Non-Null Count Dtype --- ------ -------------- ----- - 0 int_col 5 non-null int64 - 1 text_col 5 non-null object - 2 float_col 5 non-null float64 + 0 int_col 5 non-null int64 + 1 text_col 5 non-null object + 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) memory usage: 248.0+ bytes @@ -2505,11 +2505,11 @@ def to_html( RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): - # Column Non-Null Count Dtype + # Column Non-Null Count Dtype --- ------ -------------- ----- - 0 column_1 1000000 non-null object - 1 column_2 1000000 non-null object - 2 column_3 1000000 non-null object + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 22.9+ MB @@ -2517,11 +2517,11 @@ def to_html( RangeIndex: 1000000 entries, 0 to 999999 Data columns (total 3 columns): - # Column Non-Null Count Dtype + # Column Non-Null Count Dtype --- ------ -------------- ----- - 0 column_1 1000000 non-null object - 1 column_2 1000000 non-null object - 2 column_3 1000000 non-null object + 0 column_1 1000000 non-null object + 1 column_2 1000000 non-null object + 2 column_3 1000000 non-null object dtypes: object(3) memory usage: 188.8 MB""" ), @@ -2532,7 +2532,7 @@ def to_html( DataFrame.memory_usage: Memory usage of DataFrame columns.""" ), ) - @doc(info) + @doc(DataFrameInfo.info) def info( self, verbose: Optional[bool] = None, @@ -2541,7 +2541,9 @@ def info( memory_usage: Optional[Union[bool, str]] = None, null_counts: Optional[bool] = None, ) -> None: - return info(self, verbose, buf, max_cols, memory_usage, null_counts) + return DataFrameInfo( + self, verbose, buf, max_cols, memory_usage, null_counts + ).info() def memory_usage(self, index=True, deep=False) -> Series: """ diff --git a/pandas/io/formats/info.py b/pandas/io/formats/info.py index b1dcafa7a7a8f..7a53b46a4ac0f 100644 --- a/pandas/io/formats/info.py +++ b/pandas/io/formats/info.py @@ -1,15 +1,17 @@ +from abc import ABCMeta, abstractmethod import sys -from typing import IO, TYPE_CHECKING, Optional, Tuple, Union +from typing import IO, TYPE_CHECKING, List, Optional, Tuple, Union from pandas._config import get_option from pandas._typing import Dtype, FrameOrSeries +from pandas.core.indexes.api import Index + from pandas.io.formats import format as fmt from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: - from pandas.core.indexes.api import Index # noqa: F401 from pandas.core.series import Series # noqa: F401 @@ -39,115 +41,247 @@ def _put_str(s: Union[str, Dtype], space: int) -> str: return str(s)[:space].ljust(space) -def _get_ids_and_dtypes(data: FrameOrSeries) -> Tuple["Index", "Series"]: +def _sizeof_fmt(num: Union[int, float], size_qualifier: str) -> str: """ - Get DataFrame's columns and dtypes. + Return size in human readable format. Parameters ---------- - data : DataFrame - Object that `info` was called on. + num : int + Size in bytes. + size_qualifier : str + Either empty, or '+' (if lower bound). Returns ------- - ids : Index - DataFrame's columns. - dtypes : Series - Dtype of each of the DataFrame's columns. - """ - ids = data.columns - dtypes = data.dtypes - return ids, dtypes - - -def info( - data: FrameOrSeries, - verbose: Optional[bool] = None, - buf: Optional[IO[str]] = None, - max_cols: Optional[int] = None, - memory_usage: Optional[Union[bool, str]] = None, - null_counts: Optional[bool] = None, -) -> None: - """ - Print a concise summary of a %(klass)s. - - This method prints information about a %(klass)s including - the index dtype%(type_sub)s, non-null values and memory usage. - - Parameters - ---------- - data : %(klass)s - %(klass)s to print information about. - verbose : bool, optional - Whether to print the full summary. By default, the setting in - ``pandas.options.display.max_info_columns`` is followed. - buf : writable buffer, defaults to sys.stdout - Where to send the output. By default, the output is printed to - sys.stdout. Pass a writable buffer if you need to further process - the output. - %(max_cols_sub)s - memory_usage : bool, str, optional - Specifies whether total memory usage of the %(klass)s - elements (including the index) should be displayed. By default, - this follows the ``pandas.options.display.memory_usage`` setting. - - True always show memory usage. False never shows memory usage. - A value of 'deep' is equivalent to "True with deep introspection". - Memory usage is shown in human-readable units (base-2 - representation). Without deep introspection a memory estimation is - made based in column dtype and number of rows assuming values - consume the same memory amount for corresponding dtypes. With deep - memory introspection, a real memory usage calculation is performed - at the cost of computational resources. - null_counts : bool, optional - Whether to show the non-null counts. By default, this is shown - only if the %(klass)s is smaller than - ``pandas.options.display.max_info_rows`` and - ``pandas.options.display.max_info_columns``. A value of True always - shows the counts, and False never shows the counts. - - Returns - ------- - None - This method prints a summary of a %(klass)s and returns None. - - See Also - -------- - %(see_also_sub)s + str + Size in human readable format. Examples -------- - %(examples_sub)s - """ - if buf is None: # pragma: no cover - buf = sys.stdout - - lines = [] - - lines.append(str(type(data))) - lines.append(data.index._summary()) - - ids, dtypes = _get_ids_and_dtypes(data) - col_count = len(ids) - - if col_count == 0: - lines.append(f"Empty {type(data).__name__}") - fmt.buffer_put_lines(buf, lines) - return - - # hack - if max_cols is None: - max_cols = get_option("display.max_info_columns", col_count + 1) - - max_rows = get_option("display.max_info_rows", len(data) + 1) + >>> _sizeof_fmt(23028, '') + '22.5 KB' - if null_counts is None: - show_counts = (col_count <= max_cols) and (len(data) < max_rows) - else: - show_counts = null_counts - exceeds_info_cols = col_count > max_cols + >>> _sizeof_fmt(23028, '+') + '22.5+ KB' + """ + for x in ["bytes", "KB", "MB", "GB", "TB"]: + if num < 1024.0: + return f"{num:3.1f}{size_qualifier} {x}" + num /= 1024.0 + return f"{num:3.1f}{size_qualifier} PB" + + +class BaseInfo(metaclass=ABCMeta): + def __init__( + self, + data: FrameOrSeries, + verbose: Optional[bool] = None, + buf: Optional[IO[str]] = None, + max_cols: Optional[int] = None, + memory_usage: Optional[Union[bool, str]] = None, + null_counts: Optional[bool] = None, + ): + if buf is None: # pragma: no cover + buf = sys.stdout + if memory_usage is None: + memory_usage = get_option("display.memory_usage") + + self.data = data + self.verbose = verbose + self.buf = buf + self.max_cols = max_cols + self.memory_usage = memory_usage + self.null_counts = null_counts + + @abstractmethod + def _get_mem_usage(self, deep: bool) -> int: + """ + Get memory usage in bytes. + + Parameters + ---------- + deep : bool + If True, introspect the data deeply by interrogating object dtypes + for system-level memory consumption, and include it in the returned + values. + + Returns + ------- + mem_usage : int + Object's total memory usage in bytes. + """ + pass + + @abstractmethod + def _get_ids_and_dtypes(self) -> Tuple["Index", "Series"]: + """ + Get column names and dtypes. + + Returns + ------- + ids : Index + DataFrame's column names. + dtypes : Series + Dtype of each of the DataFrame's columns. + """ + pass + + @abstractmethod + def _verbose_repr( + self, lines: List[str], ids: "Index", dtypes: "Series", show_counts: bool + ) -> None: + """ + Append name, non-null count (optional), and dtype for each column to `lines`. + + Parameters + ---------- + lines : List[str] + Lines that will contain `info` representation. + ids : Index + The DataFrame's column names. + dtypes : Series + The DataFrame's columns' dtypes. + show_counts : bool + If True, count of non-NA cells for each column will be appended to `lines`. + """ + pass + + @abstractmethod + def _non_verbose_repr(self, lines: List[str], ids: "Index") -> None: + """ + Append short summary of columns' names to `lines`. + + Parameters + ---------- + lines : List[str] + Lines that will contain `info` representation. + ids : Index + The DataFrame's column names. + """ + pass + + def info(self) -> None: + """ + Print a concise summary of a %(klass)s. + + This method prints information about a %(klass)s including + the index dtype%(type_sub)s, non-null values and memory usage. + + Parameters + ---------- + data : %(klass)s + %(klass)s to print information about. + verbose : bool, optional + Whether to print the full summary. By default, the setting in + ``pandas.options.display.max_info_columns`` is followed. + buf : writable buffer, defaults to sys.stdout + Where to send the output. By default, the output is printed to + sys.stdout. Pass a writable buffer if you need to further process + the output. + %(max_cols_sub)s + memory_usage : bool, str, optional + Specifies whether total memory usage of the %(klass)s + elements (including the index) should be displayed. By default, + this follows the ``pandas.options.display.memory_usage`` setting. + + True always show memory usage. False never shows memory usage. + A value of 'deep' is equivalent to "True with deep introspection". + Memory usage is shown in human-readable units (base-2 + representation). Without deep introspection a memory estimation is + made based in column dtype and number of rows assuming values + consume the same memory amount for corresponding dtypes. With deep + memory introspection, a real memory usage calculation is performed + at the cost of computational resources. + null_counts : bool, optional + Whether to show the non-null counts. By default, this is shown + only if the %(klass)s is smaller than + ``pandas.options.display.max_info_rows`` and + ``pandas.options.display.max_info_columns``. A value of True always + shows the counts, and False never shows the counts. + + Returns + ------- + None + This method prints a summary of a %(klass)s and returns None. + + See Also + -------- + %(see_also_sub)s + + Examples + -------- + %(examples_sub)s + """ + lines = [] + + lines.append(str(type(self.data))) + lines.append(self.data.index._summary()) + + ids, dtypes = self._get_ids_and_dtypes() + col_count = len(ids) + + if col_count == 0: + lines.append(f"Empty {type(self.data).__name__}") + fmt.buffer_put_lines(self.buf, lines) + return + + # hack + max_cols = self.max_cols + if max_cols is None: + max_cols = get_option("display.max_info_columns", col_count + 1) + + max_rows = get_option("display.max_info_rows", len(self.data) + 1) + + if self.null_counts is None: + show_counts = (col_count <= max_cols) and (len(self.data) < max_rows) + else: + show_counts = self.null_counts + exceeds_info_cols = col_count > max_cols - def _verbose_repr(): + if self.verbose: + self._verbose_repr(lines, ids, dtypes, show_counts) + elif self.verbose is False: # specifically set to False, not necessarily None + self._non_verbose_repr(lines, ids) + else: + if exceeds_info_cols: + self._non_verbose_repr(lines, ids) + else: + self._verbose_repr(lines, ids, dtypes, show_counts) + + # groupby dtype.name to collect e.g. Categorical columns + counts = dtypes.value_counts().groupby(lambda x: x.name).sum() + collected_dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())] + lines.append(f"dtypes: {', '.join(collected_dtypes)}") + + if self.memory_usage: + # append memory usage of df to display + size_qualifier = "" + if self.memory_usage == "deep": + deep = True + else: + # size_qualifier is just a best effort; not guaranteed to catch + # all cases (e.g., it misses categorical data even with object + # categories) + deep = False + if "object" in counts or self.data.index._is_memory_usage_qualified(): + size_qualifier = "+" + mem_usage = self._get_mem_usage(deep=deep) + lines.append(f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n") + fmt.buffer_put_lines(self.buf, lines) + + +class DataFrameInfo(BaseInfo): + def _get_mem_usage(self, deep: bool) -> int: + return self.data.memory_usage(index=True, deep=deep).sum() + + def _get_ids_and_dtypes(self) -> Tuple["Index", "Series"]: + return self.data.columns, self.data.dtypes + + def _verbose_repr( + self, lines: List[str], ids: "Index", dtypes: "Series", show_counts: bool + ) -> None: + col_count = len(ids) lines.append(f"Data columns (total {col_count} columns):") id_head = " # " @@ -164,7 +298,7 @@ def _verbose_repr(): header = _put_str(id_head, space_num) + _put_str(column_head, space) if show_counts: - counts = data.count() + counts = self.data.count() if col_count != len(counts): # pragma: no cover raise AssertionError( f"Columns must equal counts ({col_count} != {len(counts)})" @@ -213,46 +347,5 @@ def _verbose_repr(): + _put_str(dtype, space_dtype) ) - def _non_verbose_repr(): + def _non_verbose_repr(self, lines: List[str], ids: "Index") -> None: lines.append(ids._summary(name="Columns")) - - def _sizeof_fmt(num, size_qualifier): - # returns size in human readable format - for x in ["bytes", "KB", "MB", "GB", "TB"]: - if num < 1024.0: - return f"{num:3.1f}{size_qualifier} {x}" - num /= 1024.0 - return f"{num:3.1f}{size_qualifier} PB" - - if verbose: - _verbose_repr() - elif verbose is False: # specifically set to False, not necessarily None - _non_verbose_repr() - else: - if exceeds_info_cols: - _non_verbose_repr() - else: - _verbose_repr() - - # groupby dtype.name to collect e.g. Categorical columns - counts = dtypes.value_counts().groupby(lambda x: x.name).sum() - collected_dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())] - lines.append(f"dtypes: {', '.join(collected_dtypes)}") - - if memory_usage is None: - memory_usage = get_option("display.memory_usage") - if memory_usage: - # append memory usage of df to display - size_qualifier = "" - if memory_usage == "deep": - deep = True - else: - # size_qualifier is just a best effort; not guaranteed to catch - # all cases (e.g., it misses categorical data even with object - # categories) - deep = False - if "object" in counts or data.index._is_memory_usage_qualified(): - size_qualifier = "+" - mem_usage = data.memory_usage(index=True, deep=deep).sum() - lines.append(f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n") - fmt.buffer_put_lines(buf, lines) From 867aea24d97affe107e8edab592d12d108970d83 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 30 Jun 2020 06:54:56 -0500 Subject: [PATCH 006/632] DOC/TST: DataFrame constructor with a list of DataFrames (#34991) * DOC/TST: DataFrame constructor with a list of DataFrames Closes #32289 --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/internals/construction.py | 2 +- pandas/tests/frame/test_constructors.py | 38 +++++++++---------------- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cd2b8a8055e68..040253ebe7279 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -696,6 +696,7 @@ Other API changes - :func: `merge` now checks ``suffixes`` parameter type to be ``tuple`` and raises ``TypeError``, whereas before a ``list`` or ``set`` were accepted and that the ``set`` could produce unexpected results (:issue:`33740`) - :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) - :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) +- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). Increased minimum versions for dependencies diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index d49f1f154a2c1..4b9db810dead0 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -321,7 +321,7 @@ def convert(v): if values.ndim == 1: values = values.reshape((values.shape[0], 1)) elif values.ndim != 2: - raise ValueError("Must pass 2-d input") + raise ValueError(f"Must pass 2-d input. shape={values.shape}") return values diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 02a871666c78d..dba243f1a339a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -11,7 +11,7 @@ import pytz from pandas.compat import PY37, is_platform_little_endian -from pandas.compat.numpy import _is_numpy_dev +from pandas.compat.numpy import _np_version_under1p19 from pandas.core.dtypes.common import is_integer_dtype @@ -147,14 +147,20 @@ def test_constructor_dtype_list_data(self): assert df.loc[1, 0] is None assert df.loc[0, 1] == "2" - @pytest.mark.xfail(_is_numpy_dev, reason="Interprets list of frame as 3D") - def test_constructor_list_frames(self): - # see gh-3243 - result = DataFrame([DataFrame()]) - assert result.shape == (1, 0) + @pytest.mark.skipif(_np_version_under1p19, reason="NumPy change.") + def test_constructor_list_of_2d_raises(self): + # https://github.com/pandas-dev/pandas/issues/32289 + a = pd.DataFrame() + b = np.empty((0, 0)) + with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): + pd.DataFrame([a]) - result = DataFrame([DataFrame(dict(A=np.arange(5)))]) - assert isinstance(result.iloc[0, 0], DataFrame) + with pytest.raises(ValueError, match=r"shape=\(1, 0, 0\)"): + pd.DataFrame([b]) + + a = pd.DataFrame({"A": [1, 2]}) + with pytest.raises(ValueError, match=r"shape=\(2, 2, 1\)"): + pd.DataFrame([a, a]) def test_constructor_mixed_dtypes(self): def _make_mixed_dtypes_df(typ, ad=None): @@ -507,22 +513,6 @@ def test_constructor_error_msgs(self): with pytest.raises(ValueError, match=msg): DataFrame({"a": False, "b": True}) - @pytest.mark.xfail(_is_numpy_dev, reason="Interprets embedded frame as 3D") - def test_constructor_with_embedded_frames(self): - - # embedded data frames - df1 = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}) - df2 = DataFrame([df1, df1 + 10]) - - df2.dtypes - str(df2) - - result = df2.loc[0, 0] - tm.assert_frame_equal(result, df1) - - result = df2.loc[1, 0] - tm.assert_frame_equal(result, df1 + 10) - def test_constructor_subclass_dict(self, float_frame, dict_subclass): # Test for passing dict subclass to constructor data = { From ff2b70c61c494ea6b4e8de0d15c62c6a2ababac0 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Tue, 30 Jun 2020 05:51:45 -0700 Subject: [PATCH 007/632] REF: Rename NonFixedVariableWindowIndexer to VariableOffsetWindowIndexer (#35059) Co-authored-by: Matt Roeschke --- doc/source/user_guide/computation.rst | 6 +++--- doc/source/whatsnew/v1.1.0.rst | 2 +- pandas/api/indexers/__init__.py | 4 ++-- pandas/core/window/indexers.py | 2 +- pandas/tests/window/test_base_indexer.py | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst index 3a524996ea6d9..f36c6e06044f2 100644 --- a/doc/source/user_guide/computation.rst +++ b/doc/source/user_guide/computation.rst @@ -597,15 +597,15 @@ You can view other examples of ``BaseIndexer`` subclasses `here Date: Tue, 30 Jun 2020 05:52:22 -0700 Subject: [PATCH 008/632] PERF: put some Timetamp methods in _Timestamp (#35036) --- pandas/_libs/tslibs/timestamps.pxd | 4 +- pandas/_libs/tslibs/timestamps.pyx | 291 +++++++++++++++-------------- 2 files changed, 151 insertions(+), 144 deletions(-) diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 88d21b19e1e37..27b659980e526 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -16,8 +16,8 @@ cdef class _Timestamp(ABCTimestamp): int64_t value, nanosecond object freq - cpdef bint _get_start_end_field(self, str field) - cpdef _get_date_name_field(self, object field, object locale) + cdef bint _get_start_end_field(self, str field) + cdef _get_date_name_field(self, str field, object locale) cdef int64_t _maybe_convert_value_to_local(self) cpdef to_datetime64(self) cdef _assert_tzawareness_compat(_Timestamp self, datetime other) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 471ed557f4327..15fcfc742ecf3 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -441,6 +441,8 @@ cdef class _Timestamp(ABCTimestamp): return NotImplemented + # ----------------------------------------------------------------- + cdef int64_t _maybe_convert_value_to_local(self): """Convert UTC i8 value to local i8 value if tz exists""" cdef: @@ -450,7 +452,7 @@ cdef class _Timestamp(ABCTimestamp): val = tz_convert_single(self.value, UTC, self.tz) return val - cpdef bint _get_start_end_field(self, str field): + cdef bint _get_start_end_field(self, str field): cdef: int64_t val dict kwds @@ -471,7 +473,67 @@ cdef class _Timestamp(ABCTimestamp): field, freqstr, month_kw) return out[0] - cpdef _get_date_name_field(self, object field, object locale): + @property + def is_month_start(self) -> bool: + """ + Return True if date is first day of month. + """ + if self.freq is None: + # fast-path for non-business frequencies + return self.day == 1 + return self._get_start_end_field("is_month_start") + + @property + def is_month_end(self) -> bool: + """ + Return True if date is last day of month. + """ + if self.freq is None: + # fast-path for non-business frequencies + return self.day == self.days_in_month + return self._get_start_end_field("is_month_end") + + @property + def is_quarter_start(self) -> bool: + """ + Return True if date is first day of the quarter. + """ + if self.freq is None: + # fast-path for non-business frequencies + return self.day == 1 and self.month % 3 == 1 + return self._get_start_end_field("is_quarter_start") + + @property + def is_quarter_end(self) -> bool: + """ + Return True if date is last day of the quarter. + """ + if self.freq is None: + # fast-path for non-business frequencies + return (self.month % 3) == 0 and self.day == self.days_in_month + return self._get_start_end_field("is_quarter_end") + + @property + def is_year_start(self) -> bool: + """ + Return True if date is first day of the year. + """ + if self.freq is None: + # fast-path for non-business frequencies + return self.day == self.month == 1 + return self._get_start_end_field("is_year_start") + + @property + def is_year_end(self) -> bool: + """ + Return True if date is last day of the year. + """ + if self.freq is None: + # fast-path for non-business frequencies + return self.month == 12 and self.day == 31 + return self._get_start_end_field("is_year_end") + + cdef _get_date_name_field(self, str field, object locale): cdef: int64_t val object[:] out @@ -481,6 +543,85 @@ cdef class _Timestamp(ABCTimestamp): field, locale=locale) return out[0] + def day_name(self, locale=None) -> str: + """ + Return the day name of the Timestamp with specified locale. + + Parameters + ---------- + locale : str, default None (English locale) + Locale determining the language in which to return the day name. + + Returns + ------- + day_name : string + + .. versionadded:: 0.23.0 + """ + return self._get_date_name_field("day_name", locale) + + def month_name(self, locale=None) -> str: + """ + Return the month name of the Timestamp with specified locale. + + Parameters + ---------- + locale : str, default None (English locale) + Locale determining the language in which to return the month name. + + Returns + ------- + month_name : string + + .. versionadded:: 0.23.0 + """ + return self._get_date_name_field("month_name", locale) + + @property + def is_leap_year(self) -> bool: + """ + Return True if year is a leap year. + """ + return bool(ccalendar.is_leapyear(self.year)) + + @property + def dayofweek(self) -> int: + """ + Return day of the week. + """ + return self.weekday() + + @property + def dayofyear(self) -> int: + """ + Return the day of the year. + """ + return ccalendar.get_day_of_year(self.year, self.month, self.day) + + @property + def quarter(self) -> int: + """ + Return the quarter of the year. + """ + return ((self.month - 1) // 3) + 1 + + @property + def week(self) -> int: + """ + Return the week number of the year. + """ + return ccalendar.get_week_of_year(self.year, self.month, self.day) + + @property + def days_in_month(self) -> int: + """ + Return the number of days in the month. + """ + return ccalendar.get_days_in_month(self.year, self.month) + + # ----------------------------------------------------------------- + # Rendering Methods + @property def _repr_base(self) -> str: return f"{self._date_repr} {self._time_repr}" @@ -514,6 +655,8 @@ cdef class _Timestamp(ABCTimestamp): return self._date_repr return self._repr_base + # ----------------------------------------------------------------- + @property def asm8(self) -> np.datetime64: """ @@ -1040,79 +1183,6 @@ timedelta}, default 'raise' return Period(self, freq=freq) - @property - def dayofweek(self) -> int: - """ - Return day of the week. - """ - return self.weekday() - - def day_name(self, locale=None) -> str: - """ - Return the day name of the Timestamp with specified locale. - - Parameters - ---------- - locale : str, default None (English locale) - Locale determining the language in which to return the day name. - - Returns - ------- - day_name : string - - .. versionadded:: 0.23.0 - """ - return self._get_date_name_field('day_name', locale) - - def month_name(self, locale=None) -> str: - """ - Return the month name of the Timestamp with specified locale. - - Parameters - ---------- - locale : str, default None (English locale) - Locale determining the language in which to return the month name. - - Returns - ------- - month_name : string - - .. versionadded:: 0.23.0 - """ - return self._get_date_name_field('month_name', locale) - - @property - def dayofyear(self) -> int: - """ - Return the day of the year. - """ - return ccalendar.get_day_of_year(self.year, self.month, self.day) - - @property - def week(self) -> int: - """ - Return the week number of the year. - """ - return ccalendar.get_week_of_year(self.year, self.month, self.day) - - weekofyear = week - - @property - def quarter(self) -> int: - """ - Return the quarter of the year. - """ - return ((self.month - 1) // 3) + 1 - - @property - def days_in_month(self) -> int: - """ - Return the number of days in the month. - """ - return ccalendar.get_days_in_month(self.year, self.month) - - daysinmonth = days_in_month - @property def freqstr(self): """ @@ -1120,73 +1190,6 @@ timedelta}, default 'raise' """ return getattr(self.freq, 'freqstr', self.freq) - @property - def is_month_start(self) -> bool: - """ - Return True if date is first day of month. - """ - if self.freq is None: - # fast-path for non-business frequencies - return self.day == 1 - return self._get_start_end_field('is_month_start') - - @property - def is_month_end(self) -> bool: - """ - Return True if date is last day of month. - """ - if self.freq is None: - # fast-path for non-business frequencies - return self.day == self.days_in_month - return self._get_start_end_field('is_month_end') - - @property - def is_quarter_start(self) -> bool: - """ - Return True if date is first day of the quarter. - """ - if self.freq is None: - # fast-path for non-business frequencies - return self.day == 1 and self.month % 3 == 1 - return self._get_start_end_field('is_quarter_start') - - @property - def is_quarter_end(self) -> bool: - """ - Return True if date is last day of the quarter. - """ - if self.freq is None: - # fast-path for non-business frequencies - return (self.month % 3) == 0 and self.day == self.days_in_month - return self._get_start_end_field('is_quarter_end') - - @property - def is_year_start(self) -> bool: - """ - Return True if date is first day of the year. - """ - if self.freq is None: - # fast-path for non-business frequencies - return self.day == self.month == 1 - return self._get_start_end_field('is_year_start') - - @property - def is_year_end(self) -> bool: - """ - Return True if date is last day of the year. - """ - if self.freq is None: - # fast-path for non-business frequencies - return self.month == 12 and self.day == 31 - return self._get_start_end_field('is_year_end') - - @property - def is_leap_year(self) -> bool: - """ - Return True if year is a leap year. - """ - return bool(ccalendar.is_leapyear(self.year)) - def tz_localize(self, tz, ambiguous='raise', nonexistent='raise'): """ Convert naive Timestamp to local time zone, or remove @@ -1456,6 +1459,10 @@ default 'raise' return Timestamp(normalized[0]).tz_localize(own_tz) +# Aliases +Timestamp.weekofyear = Timestamp.week +Timestamp.daysinmonth = Timestamp.days_in_month + # Add the min and max fields at the class level cdef int64_t _NS_UPPER_BOUND = np.iinfo(np.int64).max # the smallest value we could actually represent is From b1b70c7390e589bbfa0d8896aa76e64bec0cf51e Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Tue, 30 Jun 2020 09:01:23 -0400 Subject: [PATCH 009/632] BUG: HDFStore unable to create colindex w/o error thrown (#34983) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/pytables.py | 8 ++++++- pandas/tests/io/pytables/test_store.py | 31 ++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index f172fee7bcdbe..0ca19ffd1f496 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1042,6 +1042,7 @@ I/O - Bug in :meth:`read_excel` for ODS files removes 0.0 values (:issue:`27222`) - Bug in :meth:`ujson.encode` was raising an `OverflowError` with numbers larger than sys.maxsize (:issue: `34395`) - Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the min_itemsize parameter is set (:issue:`11238`) +- Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) - :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. Plotting diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0e5d7b007bd89..981b380f8b5e9 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3569,7 +3569,6 @@ def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None): for c in columns: v = getattr(table.cols, c, None) if v is not None: - # remove the index if the kind/optlevel have changed if v.is_indexed: index = v.index @@ -3597,6 +3596,13 @@ def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None): "data_columns when initializing the table." ) v.create_index(**kw) + elif c in self.non_index_axes[0][1]: + # GH 28156 + raise AttributeError( + f"column {c} is not a data_column.\n" + f"In order to read column {c} you must reload the dataframe \n" + f"into HDFStore and include {c} with the data_columns argument." + ) def _read_axes( self, where, start: Optional[int] = None, stop: Optional[int] = None diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index c69992471fc9b..df014171be817 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -1727,6 +1727,37 @@ def col(t, column): with pytest.raises(TypeError): store.create_table_index("f2") + def test_create_table_index_data_columns_argument(self, setup_path): + # GH 28156 + + with ensure_clean_store(setup_path) as store: + + with catch_warnings(record=True): + + def col(t, column): + return getattr(store.get_storer(t).table.cols, column) + + # data columns + df = tm.makeTimeDataFrame() + df["string"] = "foo" + df["string2"] = "bar" + store.append("f", df, data_columns=["string"]) + assert col("f", "index").is_indexed is True + assert col("f", "string").is_indexed is True + + msg = "'Cols' object has no attribute 'string2'" + with pytest.raises(AttributeError, match=msg): + col("f", "string2").is_indexed + + # try to index a col which isn't a data_column + msg = ( + f"column string2 is not a data_column.\n" + f"In order to read column string2 you must reload the dataframe \n" + f"into HDFStore and include string2 with the data_columns argument." + ) + with pytest.raises(AttributeError, match=msg): + store.create_table_index("f", columns=["string2"]) + def test_append_hierarchical(self, setup_path): index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], From c7bc3429135a2d5304b45fbb8fc58a2fc557289c Mon Sep 17 00:00:00 2001 From: Erfan Nariman <34067903+erfannariman@users.noreply.github.com> Date: Tue, 30 Jun 2020 18:59:23 +0200 Subject: [PATCH 010/632] Alligned docstring for ignore_index in append (#35056) --- pandas/core/frame.py | 2 +- pandas/core/series.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f6ea6f51d88a8..a21a45f415a47 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7551,7 +7551,7 @@ def append( other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : bool, default False - If True, do not use the index labels. + If True, the resulting axis will be labeled 0, 1, …, n - 1. verify_integrity : bool, default False If True, raise ValueError on creating index with duplicates. sort : bool, default False diff --git a/pandas/core/series.py b/pandas/core/series.py index 54b85afea4964..be4099d56d43a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2609,7 +2609,7 @@ def append(self, to_append, ignore_index=False, verify_integrity=False): to_append : Series or list/tuple of Series Series to append with self. ignore_index : bool, default False - If True, do not use the index labels. + If True, the resulting axis will be labeled 0, 1, …, n - 1. verify_integrity : bool, default False If True, raise Exception on creating index with duplicates. From eb48e10e2116c078a2aa8d75c7b4c8ab70e3bffb Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 30 Jun 2020 15:58:43 -0500 Subject: [PATCH 011/632] API: Allow non-tuples in pandas.merge (#34810) --- doc/source/whatsnew/v1.1.0.rst | 2 +- pandas/core/frame.py | 11 +++++++---- pandas/core/reshape/merge.py | 12 ++++++++---- pandas/tests/reshape/merge/test_merge.py | 18 +++++++----------- 4 files changed, 23 insertions(+), 20 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 0ca19ffd1f496..d32eeb493b2c2 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -693,7 +693,6 @@ Other API changes - :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. - :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) -- :func: `merge` now checks ``suffixes`` parameter type to be ``tuple`` and raises ``TypeError``, whereas before a ``list`` or ``set`` were accepted and that the ``set`` could produce unexpected results (:issue:`33740`) - :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) - :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) - The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). @@ -787,6 +786,7 @@ Deprecations - :meth:`DataFrame.to_dict` has deprecated accepting short names for ``orient`` in future versions (:issue:`32515`) - :meth:`Categorical.to_dense` is deprecated and will be removed in a future version, use ``np.asarray(cat)`` instead (:issue:`32639`) - The ``fastpath`` keyword in the ``SingleBlockManager`` constructor is deprecated and will be removed in a future version (:issue:`33092`) +- Providing ``suffixes`` as a ``set`` in :func:`pandas.merge` is deprecated. Provide a tuple instead (:issue:`33740`, :issue:`34741`). - :meth:`Index.is_mixed` is deprecated and will be removed in a future version, check ``index.inferred_type`` directly instead (:issue:`32922`) - Passing any arguments but the first one to :func:`read_html` as diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a21a45f415a47..b6993e9ed851a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -227,10 +227,13 @@ sort : bool, default False Sort the join keys lexicographically in the result DataFrame. If False, the order of the join keys depends on the join type (how keyword). -suffixes : tuple of (str, str), default ('_x', '_y') - Suffix to apply to overlapping column names in the left and right - side, respectively. To raise an exception on overlapping columns use - (False, False). +suffixes : list-like, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. copy : bool, default True If False, avoid copy if possible. indicator : bool or str, default False diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 5e4eb89f0b45f..27b331babe692 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -194,7 +194,7 @@ def merge_ordered( left DataFrame. fill_method : {'ffill', None}, default None Interpolation method for data. - suffixes : Sequence, default is ("_x", "_y") + suffixes : list-like, default is ("_x", "_y") A length-2 sequence where each element is optionally a string indicating the suffix to add to overlapping column names in `left` and `right` respectively. Pass a value of `None` instead @@ -2072,9 +2072,13 @@ def _items_overlap_with_suffix(left: Index, right: Index, suffixes: Tuple[str, s If corresponding suffix is empty, the entry is simply converted to string. """ - if not isinstance(suffixes, tuple): - raise TypeError( - f"suffixes should be tuple of (str, str). But got {type(suffixes).__name__}" + if not is_list_like(suffixes, allow_sets=False): + warnings.warn( + f"Passing 'suffixes' as a {type(suffixes)}, is not supported and may give " + "unexpected results. Provide 'suffixes' as a tuple instead. In the " + "future a 'TypeError' will be raised.", + FutureWarning, + stacklevel=4, ) to_rename = left.intersection(right) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 0a4d5f17a48cc..4fd3c688b8771 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1999,6 +1999,7 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): (0, 0, dict(suffixes=("", "_dup")), ["0", "0_dup"]), (0, 0, dict(suffixes=(None, "_dup")), [0, "0_dup"]), (0, 0, dict(suffixes=("_x", "_y")), ["0_x", "0_y"]), + (0, 0, dict(suffixes=["_x", "_y"]), ["0_x", "0_y"]), ("a", 0, dict(suffixes=(None, "_y")), ["a", 0]), (0.0, 0.0, dict(suffixes=("_x", None)), ["0.0_x", 0.0]), ("b", "b", dict(suffixes=(None, "_y")), ["b", "b_y"]), @@ -2069,18 +2070,13 @@ def test_merge_suffix_error(col1, col2, suffixes): pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes) -@pytest.mark.parametrize( - "col1, col2, suffixes", [("a", "a", {"a", "b"}), ("a", "a", None), (0, 0, None)], -) -def test_merge_suffix_type_error(col1, col2, suffixes): - a = pd.DataFrame({col1: [1, 2, 3]}) - b = pd.DataFrame({col2: [3, 4, 5]}) +@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}]) +def test_merge_suffix_warns(suffixes): + a = pd.DataFrame({"a": [1, 2, 3]}) + b = pd.DataFrame({"b": [3, 4, 5]}) - msg = ( - f"suffixes should be tuple of \\(str, str\\). But got {type(suffixes).__name__}" - ) - with pytest.raises(TypeError, match=msg): - pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + with tm.assert_produces_warning(FutureWarning): + pd.merge(a, b, left_index=True, right_index=True, suffixes={"left", "right"}) @pytest.mark.parametrize( From ca3272d50d860ec7062e8fb73f0b4a5ad3372229 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 14:05:51 -0700 Subject: [PATCH 012/632] CLN: type get_resolution tz as tzinfo (#35065) --- pandas/_libs/tslibs/resolution.pyx | 8 +++----- pandas/_libs/tslibs/tzconversion.pyx | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 4dbecc76ad986..d5f10374d2860 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -1,3 +1,4 @@ +from cpython.datetime cimport tzinfo import numpy as np from numpy cimport ndarray, int64_t, int32_t @@ -8,7 +9,7 @@ from pandas._libs.tslibs.dtypes import Resolution from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) from pandas._libs.tslibs.timezones cimport ( - is_utc, is_tzlocal, maybe_get_tz, get_dst_info) + is_utc, is_tzlocal, get_dst_info) from pandas._libs.tslibs.ccalendar cimport get_days_in_month from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal @@ -33,7 +34,7 @@ cdef: # ---------------------------------------------------------------------- -def get_resolution(const int64_t[:] stamps, tz=None): +def get_resolution(const int64_t[:] stamps, tzinfo tz=None): cdef: Py_ssize_t i, n = len(stamps) npy_datetimestruct dts @@ -43,9 +44,6 @@ def get_resolution(const int64_t[:] stamps, tz=None): Py_ssize_t[:] pos int64_t local_val, delta - if tz is not None: - tz = maybe_get_tz(tz) - if is_utc(tz) or tz is None: for i in range(n): if stamps[i] == NPY_NAT: diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 6e6b106b8f21a..925b3c5615435 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -21,7 +21,7 @@ from pandas._libs.tslibs.nattype cimport NPY_NAT from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) from pandas._libs.tslibs.timezones cimport ( - get_dst_info, is_tzlocal, is_utc, get_timezone, get_utcoffset) + get_dst_info, is_tzlocal, is_utc, get_timezone) # TODO: cdef scalar version to call from convert_str_to_tsobject From 2990c1ddd137fcaef46d2cde47334314047a580a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 14:58:43 -0700 Subject: [PATCH 013/632] PERF: type tz kwarg in create_timestamp_from_ts (#35067) --- pandas/_libs/tslib.pyx | 17 ++++++++++------- pandas/_libs/tslibs/timestamps.pxd | 4 ++-- pandas/_libs/tslibs/timestamps.pyx | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 44693d60486a9..f494e74bde55f 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -8,6 +8,7 @@ from cpython.datetime cimport ( datetime, time, timedelta, + tzinfo, ) # import datetime C API PyDateTime_IMPORT @@ -77,9 +78,9 @@ from pandas._libs.missing cimport checknull_with_nat_and_na cdef inline object create_datetime_from_ts( int64_t value, npy_datetimestruct dts, - object tz, + tzinfo tz, object freq, - bint fold + bint fold, ): """ Convenience routine to construct a datetime.datetime from its parts. @@ -92,7 +93,7 @@ cdef inline object create_datetime_from_ts( cdef inline object create_date_from_ts( int64_t value, npy_datetimestruct dts, - object tz, + tzinfo tz, object freq, bint fold ): @@ -106,7 +107,7 @@ cdef inline object create_date_from_ts( cdef inline object create_time_from_ts( int64_t value, npy_datetimestruct dts, - object tz, + tzinfo tz, object freq, bint fold ): @@ -120,7 +121,7 @@ cdef inline object create_time_from_ts( @cython.boundscheck(False) def ints_to_pydatetime( const int64_t[:] arr, - object tz=None, + tzinfo tz=None, object freq=None, bint fold=False, str box="datetime" @@ -162,7 +163,7 @@ def ints_to_pydatetime( str typ int64_t value, delta, local_value ndarray[object] result = np.empty(n, dtype=object) - object (*func_create)(int64_t, npy_datetimestruct, object, object, bint) + object (*func_create)(int64_t, npy_datetimestruct, tzinfo, object, bint) if box == "date": assert (tz is None), "tz should be None when converting to date" @@ -178,7 +179,9 @@ def ints_to_pydatetime( elif box == "datetime": func_create = create_datetime_from_ts else: - raise ValueError("box must be one of 'datetime', 'date', 'time' or 'timestamp'") + raise ValueError( + "box must be one of 'datetime', 'date', 'time' or 'timestamp'" + ) if is_utc(tz) or tz is None: for i in range(n): diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 27b659980e526..307b6dfc90715 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -1,4 +1,4 @@ -from cpython.datetime cimport datetime +from cpython.datetime cimport datetime, tzinfo from numpy cimport int64_t @@ -8,7 +8,7 @@ from pandas._libs.tslibs.np_datetime cimport npy_datetimestruct cdef object create_timestamp_from_ts(int64_t value, npy_datetimestruct dts, - object tz, object freq, bint fold) + tzinfo tz, object freq, bint fold) cdef class _Timestamp(ABCTimestamp): diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 15fcfc742ecf3..355dc0dbc5820 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -69,7 +69,7 @@ _no_input = object() cdef inline object create_timestamp_from_ts(int64_t value, npy_datetimestruct dts, - object tz, object freq, bint fold): + tzinfo tz, object freq, bint fold): """ convenience routine to construct a Timestamp from its parts """ cdef _Timestamp ts_base ts_base = _Timestamp.__new__(Timestamp, dts.year, dts.month, From 18f821cf93f1bb95ff11008900c58939e7796530 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 15:49:52 -0700 Subject: [PATCH 014/632] CLN: collect Timestamp methods (#35062) --- pandas/_libs/tslibs/timestamps.pyx | 234 +++++++++++++++-------------- 1 file changed, 119 insertions(+), 115 deletions(-) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 355dc0dbc5820..abaf0b4bf6d87 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -265,37 +265,6 @@ cdef class _Timestamp(ABCTimestamp): self._assert_tzawareness_compat(ots) return cmp_scalar(self.value, ots.value, op) - def __reduce_ex__(self, protocol): - # python 3.6 compat - # https://bugs.python.org/issue28730 - # now __reduce_ex__ is defined and higher priority than __reduce__ - return self.__reduce__() - - def __repr__(self) -> str: - stamp = self._repr_base - zone = None - - try: - stamp += self.strftime('%z') - if self.tzinfo: - zone = get_timezone(self.tzinfo) - except ValueError: - year2000 = self.replace(year=2000) - stamp += year2000.strftime('%z') - if self.tzinfo: - zone = get_timezone(self.tzinfo) - - try: - stamp += zone.strftime(' %%Z') - except AttributeError: - # e.g. tzlocal has no `strftime` - pass - - tz = f", tz='{zone}'" if zone is not None else "" - freq = "" if self.freq is None else f", freq='{self.freqstr}'" - - return f"Timestamp('{stamp}'{tz}{freq})" - cdef bint _compare_outside_nanorange(_Timestamp self, datetime other, int op) except -1: cdef: @@ -312,46 +281,6 @@ cdef class _Timestamp(ABCTimestamp): elif other.tzinfo is None: raise TypeError('Cannot compare tz-naive and tz-aware timestamps') - cpdef datetime to_pydatetime(_Timestamp self, bint warn=True): - """ - Convert a Timestamp object to a native Python datetime object. - - If warn=True, issue a warning if nanoseconds is nonzero. - """ - if self.nanosecond != 0 and warn: - warnings.warn("Discarding nonzero nanoseconds in conversion", - UserWarning, stacklevel=2) - - return datetime(self.year, self.month, self.day, - self.hour, self.minute, self.second, - self.microsecond, self.tzinfo) - - cpdef to_datetime64(self): - """ - Return a numpy.datetime64 object with 'ns' precision. - """ - return np.datetime64(self.value, 'ns') - - def to_numpy(self, dtype=None, copy=False) -> np.datetime64: - """ - Convert the Timestamp to a NumPy datetime64. - - .. versionadded:: 0.25.0 - - This is an alias method for `Timestamp.to_datetime64()`. The dtype and - copy parameters are available here only for compatibility. Their values - will not affect the return value. - - Returns - ------- - numpy.datetime64 - - See Also - -------- - DatetimeIndex.to_numpy : Similar method for DatetimeIndex. - """ - return self.to_datetime64() - def __add__(self, other): cdef: int64_t nanos = 0 @@ -619,9 +548,69 @@ cdef class _Timestamp(ABCTimestamp): """ return ccalendar.get_days_in_month(self.year, self.month) + # ----------------------------------------------------------------- + # Pickle Methods + + def __reduce_ex__(self, protocol): + # python 3.6 compat + # https://bugs.python.org/issue28730 + # now __reduce_ex__ is defined and higher priority than __reduce__ + return self.__reduce__() + + def __setstate__(self, state): + self.value = state[0] + self.freq = state[1] + self.tzinfo = state[2] + + def __reduce__(self): + object_state = self.value, self.freq, self.tzinfo + return (Timestamp, object_state) + # ----------------------------------------------------------------- # Rendering Methods + def isoformat(self, sep: str = "T") -> str: + base = super(_Timestamp, self).isoformat(sep=sep) + if self.nanosecond == 0: + return base + + if self.tzinfo is not None: + base1, base2 = base[:-6], base[-6:] + else: + base1, base2 = base, "" + + if self.microsecond != 0: + base1 += f"{self.nanosecond:03d}" + else: + base1 += f".{self.nanosecond:09d}" + + return base1 + base2 + + def __repr__(self) -> str: + stamp = self._repr_base + zone = None + + try: + stamp += self.strftime('%z') + if self.tzinfo: + zone = get_timezone(self.tzinfo) + except ValueError: + year2000 = self.replace(year=2000) + stamp += year2000.strftime('%z') + if self.tzinfo: + zone = get_timezone(self.tzinfo) + + try: + stamp += zone.strftime(' %%Z') + except AttributeError: + # e.g. tzlocal has no `strftime` + pass + + tz = f", tz='{zone}'" if zone is not None else "" + freq = "" if self.freq is None else f", freq='{self.freqstr}'" + + return f"Timestamp('{stamp}'{tz}{freq})" + @property def _repr_base(self) -> str: return f"{self._date_repr} {self._time_repr}" @@ -656,6 +645,7 @@ cdef class _Timestamp(ABCTimestamp): return self._repr_base # ----------------------------------------------------------------- + # Conversion Methods @property def asm8(self) -> np.datetime64: @@ -670,6 +660,64 @@ cdef class _Timestamp(ABCTimestamp): # Note: Naive timestamps will not match datetime.stdlib return round(self.value / 1e9, 6) + cpdef datetime to_pydatetime(_Timestamp self, bint warn=True): + """ + Convert a Timestamp object to a native Python datetime object. + + If warn=True, issue a warning if nanoseconds is nonzero. + """ + if self.nanosecond != 0 and warn: + warnings.warn("Discarding nonzero nanoseconds in conversion", + UserWarning, stacklevel=2) + + return datetime(self.year, self.month, self.day, + self.hour, self.minute, self.second, + self.microsecond, self.tzinfo) + + cpdef to_datetime64(self): + """ + Return a numpy.datetime64 object with 'ns' precision. + """ + return np.datetime64(self.value, "ns") + + def to_numpy(self, dtype=None, copy=False) -> np.datetime64: + """ + Convert the Timestamp to a NumPy datetime64. + + .. versionadded:: 0.25.0 + + This is an alias method for `Timestamp.to_datetime64()`. The dtype and + copy parameters are available here only for compatibility. Their values + will not affect the return value. + + Returns + ------- + numpy.datetime64 + + See Also + -------- + DatetimeIndex.to_numpy : Similar method for DatetimeIndex. + """ + return self.to_datetime64() + + def to_period(self, freq=None): + """ + Return an period of which this timestamp is an observation. + """ + from pandas import Period + + if self.tz is not None: + # GH#21333 + warnings.warn( + "Converting to Period representation will drop timezone information.", + UserWarning, + ) + + if freq is None: + freq = self.freq + + return Period(self, freq=freq) + # ---------------------------------------------------------------------- @@ -1156,33 +1204,6 @@ timedelta}, default 'raise' "Use tz_localize() or tz_convert() as appropriate" ) - def __setstate__(self, state): - self.value = state[0] - self.freq = state[1] - self.tzinfo = state[2] - - def __reduce__(self): - object_state = self.value, self.freq, self.tzinfo - return (Timestamp, object_state) - - def to_period(self, freq=None): - """ - Return an period of which this timestamp is an observation. - """ - from pandas import Period - - if self.tz is not None: - # GH#21333 - warnings.warn( - "Converting to Period representation will drop timezone information.", - UserWarning, - ) - - if freq is None: - freq = self.freq - - return Period(self, freq=freq) - @property def freqstr(self): """ @@ -1404,23 +1425,6 @@ default 'raise' return create_timestamp_from_ts(value, dts, _tzinfo, self.freq, fold) - def isoformat(self, sep='T'): - base = super(_Timestamp, self).isoformat(sep=sep) - if self.nanosecond == 0: - return base - - if self.tzinfo is not None: - base1, base2 = base[:-6], base[-6:] - else: - base1, base2 = base, "" - - if self.microsecond != 0: - base1 += f"{self.nanosecond:03d}" - else: - base1 += f".{self.nanosecond:09d}" - - return base1 + base2 - def to_julian_date(self) -> np.float64: """ Convert TimeStamp to a Julian Date. From de82b5e4e7d7304316c80fb6a83c197eed0801a7 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 16:26:28 -0700 Subject: [PATCH 015/632] PERF: _maybe_convert_value_to_local (#35070) --- asv_bench/benchmarks/tslibs/timestamp.py | 6 +++--- pandas/_libs/tslibs/timestamps.pyx | 11 ++++++++--- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/asv_bench/benchmarks/tslibs/timestamp.py b/asv_bench/benchmarks/tslibs/timestamp.py index 3ef9b814dd79e..b7e11089535d7 100644 --- a/asv_bench/benchmarks/tslibs/timestamp.py +++ b/asv_bench/benchmarks/tslibs/timestamp.py @@ -63,9 +63,6 @@ def time_tz(self, tz, freq): def time_dayofweek(self, tz, freq): self.ts.dayofweek - def time_weekday_name(self, tz, freq): - self.ts.day_name - def time_dayofyear(self, tz, freq): self.ts.dayofyear @@ -108,6 +105,9 @@ def time_microsecond(self, tz, freq): def time_month_name(self, tz, freq): self.ts.month_name() + def time_weekday_name(self, tz, freq): + self.ts.day_name() + class TimestampOps: params = [None, "US/Eastern", pytz.UTC, dateutil.tz.tzutc()] diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index abaf0b4bf6d87..159e4366d1f3f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -47,6 +47,7 @@ from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( check_dts_bounds, npy_datetimestruct, dt64_to_dtstruct, cmp_scalar, + pydatetime_to_dt64, ) from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.offsets cimport to_offset, is_tick_object, is_offset_object @@ -376,9 +377,13 @@ cdef class _Timestamp(ABCTimestamp): """Convert UTC i8 value to local i8 value if tz exists""" cdef: int64_t val - val = self.value - if self.tz is not None and not is_utc(self.tz): - val = tz_convert_single(self.value, UTC, self.tz) + tzinfo own_tz = self.tzinfo + npy_datetimestruct dts + + if own_tz is not None and not is_utc(own_tz): + val = pydatetime_to_dt64(self, &dts) + self.nanosecond + else: + val = self.value return val cdef bint _get_start_end_field(self, str field): From f968883efecf917d3577923c82ca56199a881fd4 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 17:12:54 -0700 Subject: [PATCH 016/632] CLN: remove unnecessary get_timezone calls (#35071) --- pandas/_libs/tslibs/conversion.pyx | 4 ++-- pandas/_libs/tslibs/tzconversion.pyx | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 884715f482cad..c1162ed482048 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -27,7 +27,7 @@ from pandas._libs.tslibs.util cimport ( from pandas._libs.tslibs.timezones cimport ( is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info, - get_timezone, maybe_get_tz, tz_compare, + maybe_get_tz, tz_compare, utc_pytz as UTC, ) from pandas._libs.tslibs.parsing import parse_datetime_string @@ -267,7 +267,7 @@ def datetime_to_datetime64(ndarray[object] values): if not tz_compare(val.tzinfo, inferred_tz): raise ValueError('Array must be all same time zone') else: - inferred_tz = get_timezone(val.tzinfo) + inferred_tz = val.tzinfo _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 925b3c5615435..a096b2807c640 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -20,8 +20,7 @@ from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, HOUR_NANOS from pandas._libs.tslibs.nattype cimport NPY_NAT from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) -from pandas._libs.tslibs.timezones cimport ( - get_dst_info, is_tzlocal, is_utc, get_timezone) +from pandas._libs.tslibs.timezones cimport get_dst_info, is_tzlocal, is_utc # TODO: cdef scalar version to call from convert_str_to_tsobject @@ -358,13 +357,13 @@ cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2): # Convert to UTC if is_tzlocal(tz1): utc_date = _tz_convert_tzlocal_utc(val, tz1, to_utc=True) - elif not is_utc(get_timezone(tz1)): + elif not is_utc(tz1): arr[0] = val utc_date = _tz_convert_dst(arr, tz1, to_utc=True)[0] else: utc_date = val - if is_utc(get_timezone(tz2)): + if is_utc(tz2): return utc_date elif is_tzlocal(tz2): return _tz_convert_tzlocal_utc(utc_date, tz2, to_utc=False) From 559189ad64ba339b33190452006544881fba3cf7 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 17:13:58 -0700 Subject: [PATCH 017/632] REF: re-use month_offset (#35073) --- pandas/_libs/tslibs/ccalendar.pxd | 2 ++ pandas/_libs/tslibs/ccalendar.pyx | 4 ++-- pandas/_libs/tslibs/fields.pyx | 37 +++++++++++++------------------ 3 files changed, 19 insertions(+), 24 deletions(-) diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd index b55780fe7d5b9..41cc477413607 100644 --- a/pandas/_libs/tslibs/ccalendar.pxd +++ b/pandas/_libs/tslibs/ccalendar.pxd @@ -14,3 +14,5 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil cdef int64_t DAY_NANOS cdef int64_t HOUR_NANOS cdef dict c_MONTH_NUMBERS + +cdef int32_t* month_offset diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 2006214169a74..9f8cf6c28adab 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -27,7 +27,7 @@ cdef int* sakamoto_arr = [0, 3, 2, 5, 0, 3, 5, 1, 4, 6, 2, 4] # The first 13 entries give the month days elapsed as of the first of month N # (or the total number of days in the year for N=13) in non-leap years. # The remaining 13 entries give the days elapsed in leap years. -cdef int32_t* _month_offset = [ +cdef int32_t* month_offset = [ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365, 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366] @@ -242,7 +242,7 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil: isleap = is_leapyear(year) - mo_off = _month_offset[isleap * 13 + month - 1] + mo_off = month_offset[isleap * 13 + month - 1] day_of_year = mo_off + day return day_of_year diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 0e5a6d3c4db46..126deb67e4189 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -17,7 +17,9 @@ from pandas._libs.tslibs.ccalendar import ( from pandas._libs.tslibs.ccalendar cimport ( DAY_NANOS, get_days_in_month, is_leapyear, dayofweek, get_week_of_year, - get_day_of_year, get_iso_calendar, iso_calendar_t) + get_day_of_year, get_iso_calendar, iso_calendar_t, + month_offset, +) from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, td64_to_tdstruct) @@ -155,19 +157,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, int end_month = 12 int start_month = 1 ndarray[int8_t] out - ndarray[int32_t, ndim=2] _month_offset bint isleap npy_datetimestruct dts int mo_off, dom, doy, dow, ldom - _month_offset = np.array( - [ - [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], - [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366], - ], - dtype=np.int32, - ) - out = np.zeros(count, dtype='int8') if freqstr: @@ -226,10 +219,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, dt64_to_dtstruct(dtindex[i], &dts) isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] + mo_off = month_offset[isleap * 13 + dts.month - 1] dom = dts.day doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] + ldom = month_offset[isleap * 13 + dts.month] dow = dayofweek(dts.year, dts.month, dts.day) if (ldom == doy and dow < 5) or ( @@ -244,10 +237,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, dt64_to_dtstruct(dtindex[i], &dts) isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] + mo_off = month_offset[isleap * 13 + dts.month - 1] dom = dts.day doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] + ldom = month_offset[isleap * 13 + dts.month] if ldom == doy: out[i] = 1 @@ -288,10 +281,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, dt64_to_dtstruct(dtindex[i], &dts) isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] + mo_off = month_offset[isleap * 13 + dts.month - 1] dom = dts.day doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] + ldom = month_offset[isleap * 13 + dts.month] dow = dayofweek(dts.year, dts.month, dts.day) if ((dts.month - end_month) % 3 == 0) and ( @@ -307,10 +300,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, dt64_to_dtstruct(dtindex[i], &dts) isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] + mo_off = month_offset[isleap * 13 + dts.month - 1] dom = dts.day doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] + ldom = month_offset[isleap * 13 + dts.month] if ((dts.month - end_month) % 3 == 0) and (ldom == doy): out[i] = 1 @@ -352,10 +345,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, dt64_to_dtstruct(dtindex[i], &dts) isleap = is_leapyear(dts.year) dom = dts.day - mo_off = _month_offset[isleap, dts.month - 1] + mo_off = month_offset[isleap * 13 + dts.month - 1] doy = mo_off + dom dow = dayofweek(dts.year, dts.month, dts.day) - ldom = _month_offset[isleap, dts.month] + ldom = month_offset[isleap * 13 + dts.month] if (dts.month == end_month) and ( (ldom == doy and dow < 5) or ( @@ -370,10 +363,10 @@ def get_start_end_field(const int64_t[:] dtindex, str field, dt64_to_dtstruct(dtindex[i], &dts) isleap = is_leapyear(dts.year) - mo_off = _month_offset[isleap, dts.month - 1] + mo_off = month_offset[isleap * 13 + dts.month - 1] dom = dts.day doy = mo_off + dom - ldom = _month_offset[isleap, dts.month] + ldom = month_offset[isleap * 13 + dts.month] if (dts.month == end_month) and (ldom == doy): out[i] = 1 From d9a09cad10d5b70d2e9c2c5162042e6e0058c3d4 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 30 Jun 2020 18:40:41 -0700 Subject: [PATCH 018/632] PERF: Timestamp.normalize (#35068) * PERF: Timestamp.normalize * lint fixup --- asv_bench/benchmarks/tslibs/timestamp.py | 25 +++++++++++++++------ pandas/_libs/tslibs/conversion.pxd | 1 + pandas/_libs/tslibs/conversion.pyx | 10 ++++----- pandas/_libs/tslibs/timestamps.pyx | 28 +++++++++++++----------- 4 files changed, 39 insertions(+), 25 deletions(-) diff --git a/asv_bench/benchmarks/tslibs/timestamp.py b/asv_bench/benchmarks/tslibs/timestamp.py index b7e11089535d7..40f8e561f5238 100644 --- a/asv_bench/benchmarks/tslibs/timestamp.py +++ b/asv_bench/benchmarks/tslibs/timestamp.py @@ -1,17 +1,29 @@ -import datetime +from datetime import datetime, timedelta, timezone -import dateutil +from dateutil.tz import gettz, tzlocal, tzutc import numpy as np import pytz from pandas import Timestamp +# One case for each type of tzinfo object that has its own code path +# in tzconversion code. +_tzs = [ + None, + pytz.timezone("Europe/Amsterdam"), + gettz("US/Central"), + pytz.UTC, + tzutc(), + timezone(timedelta(minutes=60)), + tzlocal(), +] + class TimestampConstruction: def setup(self): self.npdatetime64 = np.datetime64("2020-01-01 00:00:00") - self.dttime_unaware = datetime.datetime(2020, 1, 1, 0, 0, 0) - self.dttime_aware = datetime.datetime(2020, 1, 1, 0, 0, 0, 0, pytz.UTC) + self.dttime_unaware = datetime(2020, 1, 1, 0, 0, 0) + self.dttime_aware = datetime(2020, 1, 1, 0, 0, 0, 0, pytz.UTC) self.ts = Timestamp("2020-01-01 00:00:00") def time_parse_iso8601_no_tz(self): @@ -49,7 +61,6 @@ def time_from_pd_timestamp(self): class TimestampProperties: - _tzs = [None, pytz.timezone("Europe/Amsterdam"), pytz.UTC, dateutil.tz.tzutc()] _freqs = [None, "B"] params = [_tzs, _freqs] param_names = ["tz", "freq"] @@ -110,7 +121,7 @@ def time_weekday_name(self, tz, freq): class TimestampOps: - params = [None, "US/Eastern", pytz.UTC, dateutil.tz.tzutc()] + params = _tzs param_names = ["tz"] def setup(self, tz): @@ -148,7 +159,7 @@ def time_ceil(self, tz): class TimestampAcrossDst: def setup(self): - dt = datetime.datetime(2016, 3, 27, 1) + dt = datetime(2016, 3, 27, 1) self.tzinfo = pytz.timezone("CET").localize(dt, is_dst=False).tzinfo self.ts2 = Timestamp(dt) diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 94f6d1d9020d2..623d9f14d646b 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -26,3 +26,4 @@ cpdef datetime localize_pydatetime(datetime dt, object tz) cdef int64_t cast_from_unit(object ts, str unit) except? -1 cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz) +cdef int64_t normalize_i8_stamp(int64_t local_val) nogil diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index c1162ed482048..ac24dd546d9d3 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -795,14 +795,14 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t result[i] = NPY_NAT continue local_val = stamps[i] - result[i] = _normalize_i8_stamp(local_val) + result[i] = normalize_i8_stamp(local_val) elif is_tzlocal(tz): for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - result[i] = _normalize_i8_stamp(local_val) + result[i] = normalize_i8_stamp(local_val) else: # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) @@ -815,7 +815,7 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t result[i] = NPY_NAT continue local_val = stamps[i] + delta - result[i] = _normalize_i8_stamp(local_val) + result[i] = normalize_i8_stamp(local_val) else: pos = trans.searchsorted(stamps, side='right') - 1 for i in range(n): @@ -823,13 +823,13 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t result[i] = NPY_NAT continue local_val = stamps[i] + deltas[pos[i]] - result[i] = _normalize_i8_stamp(local_val) + result[i] = normalize_i8_stamp(local_val) return result.base # `.base` to access underlying ndarray @cython.cdivision -cdef inline int64_t _normalize_i8_stamp(int64_t local_val) nogil: +cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil: """ Round the localized nanosecond timestamp down to the previous midnight. diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 159e4366d1f3f..ba6cee3d7ad8e 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -40,7 +40,7 @@ from pandas._libs.tslibs.conversion cimport ( _TSObject, convert_to_tsobject, convert_datetime_to_tsobject, - normalize_i8_timestamps, + normalize_i8_stamp, ) from pandas._libs.tslibs.fields import get_start_end_field, get_date_name_field from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT @@ -553,6 +553,20 @@ cdef class _Timestamp(ABCTimestamp): """ return ccalendar.get_days_in_month(self.year, self.month) + # ----------------------------------------------------------------- + # Transformation Methods + + def normalize(self) -> "Timestamp": + """ + Normalize Timestamp to midnight, preserving tz information. + """ + cdef: + local_val = self._maybe_convert_value_to_local() + int64_t normalized + + normalized = normalize_i8_stamp(local_val) + return Timestamp(normalized).tz_localize(self.tzinfo) + # ----------------------------------------------------------------- # Pickle Methods @@ -1455,18 +1469,6 @@ default 'raise' self.nanosecond / 3600.0 / 1e+9 ) / 24.0) - def normalize(self): - """ - Normalize Timestamp to midnight, preserving tz information. - """ - cdef: - ndarray[int64_t] normalized - tzinfo own_tz = self.tzinfo # could be None - - normalized = normalize_i8_timestamps( - np.array([self.value], dtype="i8"), tz=own_tz) - return Timestamp(normalized[0]).tz_localize(own_tz) - # Aliases Timestamp.weekofyear = Timestamp.week From bf12604c847e87d89fd91cc17b443a7688d0fefa Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 30 Jun 2020 20:43:25 -0500 Subject: [PATCH 019/632] BUG: Fixed concat with reindex and extension types (#33522) * BUG: Fixed concat with reindex and extension types Closes https://github.com/pandas-dev/pandas/issues/27692 Closes https://github.com/pandas-dev/pandas/issues/33027 * rebase * fixup * cleanup * fixups --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/internals/concat.py | 31 +++++++++++++++++----- pandas/core/reshape/concat.py | 2 +- pandas/tests/extension/base/reshaping.py | 13 +++++++++ pandas/tests/extension/test_categorical.py | 3 ++- 5 files changed, 41 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index d32eeb493b2c2..2fac8d1a8f63f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1123,6 +1123,7 @@ ExtensionArray ^^^^^^^^^^^^^^ - Fixed bug where :meth:`Series.value_counts` would raise on empty input of ``Int64`` dtype (:issue:`33317`) +- Fixed bug in :func:`concat` when concatenating DataFrames with non-overlaping columns resulting in object-dtype columns rather than preserving the extension dtype (:issue:`27692`, :issue:`33027`) - Fixed bug where :meth:`StringArray.isna` would return ``False`` for NA values when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33655`) - Fixed bug in :class:`Series` construction with EA dtype and index but no data or scalar data fails (:issue:`26469`) - Fixed bug that caused :meth:`Series.__repr__()` to crash for extension types whose elements are multidimensional arrays (:issue:`33770`). diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index fd8c5f5e27c02..2cc7461986c8f 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -29,7 +29,7 @@ def concatenate_block_managers( - mgrs_indexers, axes, concat_axis: int, copy: bool + mgrs_indexers, axes, concat_axis: int, copy: bool, ) -> BlockManager: """ Concatenate block managers into one. @@ -76,7 +76,7 @@ def concatenate_block_managers( b = make_block(values, placement=placement, ndim=blk.ndim) else: b = make_block( - _concatenate_join_units(join_units, concat_axis, copy=copy), + _concatenate_join_units(join_units, concat_axis, copy=copy,), placement=placement, ) blocks.append(b) @@ -260,6 +260,16 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): pass elif getattr(self.block, "is_extension", False): pass + elif is_extension_array_dtype(empty_dtype): + missing_arr = empty_dtype.construct_array_type()._from_sequence( + [], dtype=empty_dtype + ) + ncols, nrows = self.shape + assert ncols == 1, ncols + empty_arr = -1 * np.ones((nrows,), dtype=np.intp) + return missing_arr.take( + empty_arr, allow_fill=True, fill_value=fill_value + ) else: missing_arr = np.empty(self.shape, dtype=empty_dtype) missing_arr.fill(fill_value) @@ -329,7 +339,7 @@ def _concatenate_join_units(join_units, concat_axis, copy): # 2D to put it a non-EA Block concat_values = np.atleast_2d(concat_values) else: - concat_values = concat_compat(to_concat, axis=concat_axis) + concat_values = concat_compat(to_concat, axis=concat_axis,) return concat_values @@ -374,6 +384,10 @@ def _get_empty_dtype_and_na(join_units): upcast_cls = "category" elif is_datetime64tz_dtype(dtype): upcast_cls = "datetimetz" + + elif is_extension_array_dtype(dtype): + upcast_cls = "extension" + elif issubclass(dtype.type, np.bool_): upcast_cls = "bool" elif issubclass(dtype.type, np.object_): @@ -384,8 +398,6 @@ def _get_empty_dtype_and_na(join_units): upcast_cls = "timedelta" elif is_sparse(dtype): upcast_cls = dtype.subtype.name - elif is_extension_array_dtype(dtype): - upcast_cls = "object" elif is_float_dtype(dtype) or is_numeric_dtype(dtype): upcast_cls = dtype.name else: @@ -401,10 +413,15 @@ def _get_empty_dtype_and_na(join_units): if not upcast_classes: upcast_classes = null_upcast_classes - # TODO: de-duplicate with maybe_promote? # create the result - if "object" in upcast_classes: + if "extension" in upcast_classes: + if len(upcast_classes) == 1: + cls = upcast_classes["extension"][0] + return cls, cls.na_value + else: + return np.dtype("object"), np.nan + elif "object" in upcast_classes: return np.dtype(np.object_), np.nan elif "bool" in upcast_classes: if has_none_blocks: diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 299b68c6e71e0..9e8fb643791f2 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -500,7 +500,7 @@ def get_result(self): mgrs_indexers.append((obj._mgr, indexers)) new_data = concatenate_block_managers( - mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy + mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy, ) if not self.copy: new_data._consolidate_inplace() diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index cd932e842e00c..3774e018a8e51 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -107,6 +107,19 @@ def test_concat_extension_arrays_copy_false(self, data, na_value): result = pd.concat([df1, df2], axis=1, copy=False) self.assert_frame_equal(result, expected) + def test_concat_with_reindex(self, data): + # GH-33027 + a = pd.DataFrame({"a": data[:5]}) + b = pd.DataFrame({"b": data[:5]}) + result = pd.concat([a, b], ignore_index=True) + expected = pd.DataFrame( + { + "a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True), + "b": data.take(([-1] * 5) + list(range(5)), allow_fill=True), + } + ) + self.assert_frame_equal(result, expected) + def test_align(self, data, na_value): a = data[:3] b = data[2:5] diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index d1211e477fe3e..f7b572a70073a 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -93,7 +93,8 @@ class TestConstructors(base.BaseConstructorsTests): class TestReshaping(base.BaseReshapingTests): - pass + def test_concat_with_reindex(self, data): + pytest.xfail(reason="Deliberately upcast to object?") class TestGetitem(base.BaseGetitemTests): From f451a2386148b1d7f89bc1aabad78cc776c42900 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Wed, 1 Jul 2020 08:22:48 -0700 Subject: [PATCH 020/632] Assorted ujson Cleanups (#35064) --- pandas/_libs/src/ujson/python/objToJSON.c | 47 ++++++----------------- 1 file changed, 11 insertions(+), 36 deletions(-) diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 1de9642761961..e841f00489887 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -324,19 +324,6 @@ static npy_float64 total_seconds(PyObject *td) { return double_val; } -static PyObject *get_item(PyObject *obj, Py_ssize_t i) { - PyObject *tmp = PyLong_FromSsize_t(i); - PyObject *ret; - - if (tmp == 0) { - return 0; - } - ret = PyObject_GetItem(obj, tmp); - Py_DECREF(tmp); - - return ret; -} - static char *PyBytesToUTF8(JSOBJ _obj, JSONTypeContext *Py_UNUSED(tc), size_t *_outLen) { PyObject *obj = (PyObject *)_obj; @@ -704,7 +691,6 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { PdBlockContext *blkCtxt; NpyArrContext *npyarr; Py_ssize_t i; - PyArray_Descr *dtype; NpyIter *iter; NpyIter_IterNextFunc *iternext; npy_int64 **dataptr; @@ -712,10 +698,6 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { npy_intp idx; PRINTMARK(); - - i = 0; - blocks = NULL; - dtype = PyArray_DescrFromType(NPY_INT64); obj = (PyObject *)_obj; GET_TC(tc)->iterGetName = GET_TC(tc)->transpose @@ -726,7 +708,7 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { if (!blkCtxt) { PyErr_NoMemory(); GET_TC(tc)->iterNext = NpyArr_iterNextNone; - goto BLKRET; + return; } GET_TC(tc)->pdblock = blkCtxt; @@ -739,7 +721,7 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { blkCtxt->cindices = NULL; GET_TC(tc)->iterNext = NpyArr_iterNextNone; - goto BLKRET; + return; } blkCtxt->npyCtxts = @@ -747,30 +729,30 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { if (!blkCtxt->npyCtxts) { PyErr_NoMemory(); GET_TC(tc)->iterNext = NpyArr_iterNextNone; - goto BLKRET; - } - for (i = 0; i < blkCtxt->ncols; i++) { - blkCtxt->npyCtxts[i] = NULL; + return; } blkCtxt->cindices = PyObject_Malloc(sizeof(int) * blkCtxt->ncols); if (!blkCtxt->cindices) { PyErr_NoMemory(); GET_TC(tc)->iterNext = NpyArr_iterNextNone; - goto BLKRET; + return; } blocks = get_sub_attr(obj, "_mgr", "blocks"); if (!blocks) { GET_TC(tc)->iterNext = NpyArr_iterNextNone; - goto BLKRET; + return; + } else if (!PyTuple_Check(blocks)) { + PyErr_SetString(PyExc_TypeError, "blocks must be a tuple!"); + goto BLKRET; } // force transpose so each NpyArrContext strides down its column GET_TC(tc)->transpose = 1; for (i = 0; i < PyObject_Length(blocks); i++) { - block = get_item(blocks, i); + block = PyTuple_GET_ITEM(blocks, i); if (!block) { GET_TC(tc)->iterNext = NpyArr_iterNextNone; goto BLKRET; @@ -779,7 +761,6 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { tmp = PyObject_CallMethod(block, "get_block_values_for_json", NULL); if (!tmp) { ((JSONObjectEncoder *)tc->encoder)->errorMsg = ""; - Py_DECREF(block); GET_TC(tc)->iterNext = NpyArr_iterNextNone; goto BLKRET; } @@ -787,23 +768,20 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { values = PyArray_Transpose((PyArrayObject *)tmp, NULL); Py_DECREF(tmp); if (!values) { - Py_DECREF(block); GET_TC(tc)->iterNext = NpyArr_iterNextNone; goto BLKRET; } locs = (PyArrayObject *)get_sub_attr(block, "mgr_locs", "as_array"); if (!locs) { - Py_DECREF(block); Py_DECREF(values); GET_TC(tc)->iterNext = NpyArr_iterNextNone; goto BLKRET; } iter = NpyIter_New(locs, NPY_ITER_READONLY, NPY_KEEPORDER, - NPY_NO_CASTING, dtype); + NPY_NO_CASTING, NULL); if (!iter) { - Py_DECREF(block); Py_DECREF(values); Py_DECREF(locs); GET_TC(tc)->iterNext = NpyArr_iterNextNone; @@ -812,7 +790,6 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { iternext = NpyIter_GetIterNext(iter, NULL); if (!iternext) { NpyIter_Deallocate(iter); - Py_DECREF(block); Py_DECREF(values); Py_DECREF(locs); GET_TC(tc)->iterNext = NpyArr_iterNextNone; @@ -846,15 +823,13 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { } while (iternext(iter)); NpyIter_Deallocate(iter); - Py_DECREF(block); Py_DECREF(values); Py_DECREF(locs); } GET_TC(tc)->npyarr = blkCtxt->npyCtxts[0]; BLKRET: - Py_XDECREF(dtype); - Py_XDECREF(blocks); + Py_DECREF(blocks); } void PdBlock_iterEnd(JSOBJ obj, JSONTypeContext *tc) { From 18b3af6c5f8574cb991d59290096f380f9bbb747 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 1 Jul 2020 08:28:45 -0700 Subject: [PATCH 021/632] CLN: typing in libtimezones (#35079) --- pandas/_libs/tslibs/timezones.pxd | 2 +- pandas/_libs/tslibs/timezones.pyx | 28 +++++++++++++++------------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 14c0523787422..2428993c45f56 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -14,4 +14,4 @@ cpdef object maybe_get_tz(object tz) cdef get_utcoffset(tzinfo tz, obj) cdef bint is_fixed_offset(tzinfo tz) -cdef object get_dst_info(object tz) +cdef object get_dst_info(tzinfo tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 7fbb50fcbfd41..4b8244c269828 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -116,7 +116,7 @@ def _p_tz_cache_key(tz): dst_cache = {} -cdef inline object tz_cache_key(object tz): +cdef inline object tz_cache_key(tzinfo tz): """ Return the key in the cache for the timezone info object or None if unknown. @@ -210,13 +210,16 @@ cdef int64_t[:] unbox_utcoffsets(object transinfo): # Daylight Savings -cdef object get_dst_info(object tz): +cdef object get_dst_info(tzinfo tz): """ - return a tuple of : - (UTC times of DST transitions, - UTC offsets in microseconds corresponding to DST transitions, - string of type of transitions) - + Returns + ------- + ndarray[int64_t] + Nanosecond UTC times of DST transitions. + ndarray[int64_t] + Nanosecond UTC offsets corresponding to DST transitions. + str + Desscribing the type of tzinfo object. """ cache_key = tz_cache_key(tz) if cache_key is None: @@ -225,7 +228,7 @@ cdef object get_dst_info(object tz): num = int(get_utcoffset(tz, None).total_seconds()) * 1_000_000_000 return (np.array([NPY_NAT + 1], dtype=np.int64), np.array([num], dtype=np.int64), - None) + "unknown") if cache_key not in dst_cache: if treat_tz_as_pytz(tz): @@ -267,14 +270,13 @@ cdef object get_dst_info(object tz): # (under the just-deleted code that returned empty arrays) raise AssertionError("dateutil tzinfo is not a FixedOffset " "and has an empty `_trans_list`.", tz) - else: - # static tzinfo - # TODO: This case is not hit in tests (2018-07-17); is it possible? + # static tzinfo, we can get here with pytz.StaticTZInfo + # which are not caught by treat_tz_as_pytz trans = np.array([NPY_NAT + 1], dtype=np.int64) - num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000 + num = int(get_utcoffset(tz, None).total_seconds()) * 1_000_000_000 deltas = np.array([num], dtype=np.int64) - typ = 'static' + typ = "static" dst_cache[cache_key] = (trans, deltas, typ) From c616c6853c5c4e77a5e5a61e60e8af5d88481200 Mon Sep 17 00:00:00 2001 From: Graham Wetzler Date: Wed, 1 Jul 2020 11:53:34 -0500 Subject: [PATCH 022/632] TST: Add test for df.apply(lambda x: x.dtype) (#35072) --- pandas/tests/frame/test_apply.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 8f0d3d9fbc734..114b3c0d0a3fc 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -1501,3 +1501,12 @@ def test_consistency_of_aggregates_of_columns_with_missing_values(self, df, meth tm.assert_series_equal( none_in_first_column_result, none_in_second_column_result ) + + @pytest.mark.parametrize("col", [1, 1.0, True, "a", np.nan]) + def test_apply_dtype(self, col): + # GH 31466 + df = pd.DataFrame([[1.0, col]], columns=["a", "b"]) + result = df.apply(lambda x: x.dtype) + expected = df.dtypes + + tm.assert_series_equal(result, expected) From eb4bbe22e8bf56afae931d6a1e1a56cec6d446fa Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 1 Jul 2020 10:07:39 -0700 Subject: [PATCH 023/632] CLN: stronger typing in libtimezones (#35082) --- pandas/_libs/tslibs/timestamps.pyx | 2 +- pandas/_libs/tslibs/timezones.pxd | 6 +++--- pandas/_libs/tslibs/timezones.pyx | 17 ++++++++++------- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index ba6cee3d7ad8e..1328bc8a5175f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -991,7 +991,7 @@ class Timestamp(_Timestamp): "Timestamp from components." ) - if tz is not None and treat_tz_as_pytz(tz): + if tz is not None and PyTZInfo_Check(tz) and treat_tz_as_pytz(tz): raise ValueError( "pytz timezones do not support fold. Please use dateutil " "timezones." diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 2428993c45f56..78483bd6fe09e 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -2,10 +2,10 @@ from cpython.datetime cimport tzinfo cdef tzinfo utc_pytz -cpdef bint is_utc(object tz) -cdef bint is_tzlocal(object tz) +cpdef bint is_utc(tzinfo tz) +cdef bint is_tzlocal(tzinfo tz) -cdef bint treat_tz_as_pytz(object tz) +cdef bint treat_tz_as_pytz(tzinfo tz) cpdef bint tz_compare(object start, object end) cpdef object get_timezone(object tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 4b8244c269828..7d8aabcc47835 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,5 +1,6 @@ -from cpython.datetime cimport tzinfo from datetime import timezone +from cpython.datetime cimport tzinfo, PyTZInfo_Check, PyDateTime_IMPORT +PyDateTime_IMPORT # dateutil compat from dateutil.tz import ( @@ -29,20 +30,20 @@ cdef tzinfo utc_pytz = UTC # ---------------------------------------------------------------------- -cpdef inline bint is_utc(object tz): +cpdef inline bint is_utc(tzinfo tz): return tz is utc_pytz or tz is utc_stdlib or isinstance(tz, _dateutil_tzutc) -cdef inline bint is_tzlocal(object tz): +cdef inline bint is_tzlocal(tzinfo tz): return isinstance(tz, _dateutil_tzlocal) -cdef inline bint treat_tz_as_pytz(object tz): +cdef inline bint treat_tz_as_pytz(tzinfo tz): return (hasattr(tz, '_utc_transition_times') and hasattr(tz, '_transition_info')) -cdef inline bint treat_tz_as_dateutil(object tz): +cdef inline bint treat_tz_as_dateutil(tzinfo tz): return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx') @@ -59,7 +60,9 @@ cpdef inline object get_timezone(object tz): the tz name. It needs to be a string so that we can serialize it with UJSON/pytables. maybe_get_tz (below) is the inverse of this process. """ - if is_utc(tz): + if not PyTZInfo_Check(tz): + return tz + elif is_utc(tz): return tz else: if treat_tz_as_dateutil(tz): @@ -327,7 +330,7 @@ cpdef bint tz_compare(object start, object end): return get_timezone(start) == get_timezone(end) -def tz_standardize(tz: object): +def tz_standardize(tz: tzinfo): """ If the passed tz is a pytz timezone object, "normalize" it to the a consistent version From c78c537a64159da70f968a3d3a484aa978b32957 Mon Sep 17 00:00:00 2001 From: Erfan Nariman <34067903+erfannariman@users.noreply.github.com> Date: Wed, 1 Jul 2020 20:25:39 +0200 Subject: [PATCH 024/632] TST: Test for groupby transform on categorical column (#35074) * GH29037 Added test for groupby transform on categorical column * GH29037 - changes for black * GH29037 - moved test to test_categorical --- pandas/tests/groupby/test_categorical.py | 50 ++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 60c82bf1fb71c..4de61f719dfbb 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1509,3 +1509,53 @@ def test_aggregate_categorical_with_isnan(): index=index, ) tm.assert_frame_equal(result, expected) + + +def test_categorical_transform(): + # GH 29037 + df = pd.DataFrame( + { + "package_id": [1, 1, 1, 2, 2, 3], + "status": [ + "Waiting", + "OnTheWay", + "Delivered", + "Waiting", + "OnTheWay", + "Waiting", + ], + } + ) + + delivery_status_type = pd.CategoricalDtype( + categories=["Waiting", "OnTheWay", "Delivered"], ordered=True + ) + df["status"] = df["status"].astype(delivery_status_type) + df["last_status"] = df.groupby("package_id")["status"].transform(max) + result = df.copy() + + expected = pd.DataFrame( + { + "package_id": [1, 1, 1, 2, 2, 3], + "status": [ + "Waiting", + "OnTheWay", + "Delivered", + "Waiting", + "OnTheWay", + "Waiting", + ], + "last_status": [ + "Delivered", + "Delivered", + "Delivered", + "OnTheWay", + "OnTheWay", + "Waiting", + ], + } + ) + + expected["status"] = expected["status"].astype(delivery_status_type) + + tm.assert_frame_equal(result, expected) From 5507452c4a675ad5453bc5f02cd68b65fe3977df Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Wed, 1 Jul 2020 14:28:31 -0400 Subject: [PATCH 025/632] Test for pd.to_sql column error if data contains -np.inf (#34493) * TST: pd.to_sql for dataframes with -np.inf (#34431) * DOC: updated what's new (#34431) * DOC: improved entry (#34431) * TST: moved to _TestSQLAlchemy + added round trips * TST: rename + add comment with GH issue # * TST: rewrote using pytest.mark.parametrize for arg to DataFrame * TST: removed underscore from _input * DOC: added double backtick to np.inf & removed extraneous space * TST: pd.to_sql for dataframes with -np.inf (#34431) * DOC: updated what's new (#34431) * DOC: improved entry (#34431) * TST: moved to _TestSQLAlchemy + added round trips * TST: rename + add comment with GH issue # * TST: rewrote using pytest.mark.parametrize for arg to DataFrame * TST: removed underscore from _input * DOC: added double backtick to np.inf & removed extraneous space * BUG: add catch for MySQL error with np.inf * use regex for string match + add runtime import * clean up regex * TST: update to catch error for -np.inf with MySQL * DOC: resolved conflict in whatsnew * TST: update test_to_sql_with_neg_npinf * fixed error handler syntax in SQLDatabase.to_sql * fixed error handler syntax in SQLDatabase.to_sql * TST: added an xfail test for npinf entries with mysql * fixed imports * added reference to GH issue * fixed test_to_sql_with_npinf error catch * fixed spelling error in message (can not -> cannot) * DOC: added info re MySQL ValueError to whatsnew * fixed variable name in to_sql * replaced sqlalchemy's dialect.name with flavor * fixed typo in test_to_sql-with-npinf --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/sql.py | 15 ++++++++++++++- pandas/tests/io/test_sql.py | 18 ++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2fac8d1a8f63f..8c6add92e658b 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1044,6 +1044,7 @@ I/O - Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the min_itemsize parameter is set (:issue:`11238`) - Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) - :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. +- Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) Plotting ^^^^^^^^ diff --git a/pandas/io/sql.py b/pandas/io/sql.py index b137608475b3d..9177696ca13d6 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -1391,7 +1391,20 @@ def to_sql( dtype=dtype, ) table.create() - table.insert(chunksize, method=method) + + from sqlalchemy import exc + + try: + table.insert(chunksize, method=method) + except exc.SQLAlchemyError as err: + # GH34431 + msg = "(1054, \"Unknown column 'inf' in 'field list'\")" + err_text = str(err.orig) + if re.search(msg, err_text): + raise ValueError("inf cannot be used with MySQL") from err + else: + raise err + if not name.isdigit() and not name.islower(): # check for potentially case sensitivity issues (GH7815) # Only check when name is not a number and name is not lower case diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a07e7a74b7573..0991fae39138e 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1813,6 +1813,24 @@ def main(connectable): DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn) main(self.conn) + @pytest.mark.parametrize( + "input", + [{"foo": [np.inf]}, {"foo": [-np.inf]}, {"foo": [-np.inf], "infe0": ["bar"]}], + ) + def test_to_sql_with_negative_npinf(self, input): + # GH 34431 + + df = pd.DataFrame(input) + + if self.flavor == "mysql": + msg = "inf cannot be used with MySQL" + with pytest.raises(ValueError, match=msg): + df.to_sql("foobar", self.conn, index=False) + else: + df.to_sql("foobar", self.conn, index=False) + res = sql.read_sql_table("foobar", self.conn) + tm.assert_equal(df, res) + def test_temporary_table(self): test_data = "Hello, World!" expected = DataFrame({"spam": [test_data]}) From 8d10bfb6ff10a1199b33cd06a347c1187c4b3e94 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 1 Jul 2020 17:16:05 -0700 Subject: [PATCH 026/632] TYP: annotations, typing for infer_tzinfo (#35084) --- pandas/_libs/tslibs/conversion.pyx | 3 ++- pandas/_libs/tslibs/timezones.pyx | 4 ++-- pandas/core/arrays/datetimes.py | 13 ++++++++----- pandas/io/pytables.py | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index ac24dd546d9d3..9ee76a8c291a8 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -246,11 +246,12 @@ def datetime_to_datetime64(ndarray[object] values): """ cdef: Py_ssize_t i, n = len(values) - object val, inferred_tz = None + object val int64_t[:] iresult npy_datetimestruct dts _TSObject _ts bint found_naive = False + tzinfo inferred_tz = None result = np.empty(n, dtype='M8[ns]') iresult = result.view('i8') diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 7d8aabcc47835..112da2eb5b624 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,5 +1,5 @@ from datetime import timezone -from cpython.datetime cimport tzinfo, PyTZInfo_Check, PyDateTime_IMPORT +from cpython.datetime cimport datetime, tzinfo, PyTZInfo_Check, PyDateTime_IMPORT PyDateTime_IMPORT # dateutil compat @@ -286,7 +286,7 @@ cdef object get_dst_info(tzinfo tz): return dst_cache[cache_key] -def infer_tzinfo(start, end): +def infer_tzinfo(datetime start, datetime end): if start is not None and end is not None: tz = start.tzinfo if not tz_compare(tz, end.tzinfo): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 461f71ff821fa..296f11583c372 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,4 +1,4 @@ -from datetime import datetime, time, timedelta +from datetime import datetime, time, timedelta, tzinfo from typing import Optional, Union import warnings @@ -1908,6 +1908,7 @@ def sequence_to_dt64ns( inferred_freq = None dtype = _validate_dt64_dtype(dtype) + tz = timezones.maybe_get_tz(tz) if not hasattr(data, "dtype"): # e.g. list, tuple @@ -1950,14 +1951,14 @@ def sequence_to_dt64ns( data, inferred_tz = objects_to_datetime64ns( data, dayfirst=dayfirst, yearfirst=yearfirst ) - tz = maybe_infer_tz(tz, inferred_tz) + tz = _maybe_infer_tz(tz, inferred_tz) data_dtype = data.dtype # `data` may have originally been a Categorical[datetime64[ns, tz]], # so we need to handle these types. if is_datetime64tz_dtype(data_dtype): # DatetimeArray -> ndarray - tz = maybe_infer_tz(tz, data.tz) + tz = _maybe_infer_tz(tz, data.tz) result = data._data elif is_datetime64_dtype(data_dtype): @@ -2144,7 +2145,9 @@ def maybe_convert_dtype(data, copy): # Validation and Inference -def maybe_infer_tz(tz, inferred_tz): +def _maybe_infer_tz( + tz: Optional[tzinfo], inferred_tz: Optional[tzinfo] +) -> Optional[tzinfo]: """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. @@ -2216,7 +2219,7 @@ def _validate_dt64_dtype(dtype): return dtype -def validate_tz_from_dtype(dtype, tz): +def validate_tz_from_dtype(dtype, tz: Optional[tzinfo]) -> Optional[tzinfo]: """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 981b380f8b5e9..b67a1c5781d91 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -4714,7 +4714,7 @@ def _set_tz( if tz is not None: name = getattr(values, "name", None) values = values.ravel() - tz = timezones.get_timezone(_ensure_decoded(tz)) + tz = _ensure_decoded(tz) values = DatetimeIndex(values, name=name) values = values.tz_localize("UTC").tz_convert(tz) elif coerce: From 2b7b975f5869982ae0280df20ba2dd2ff46acbaf Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 2 Jul 2020 07:42:05 -0700 Subject: [PATCH 027/632] TYP: types for tz_compare (#35093) --- pandas/_libs/lib.pyx | 4 ++-- pandas/_libs/tslibs/timezones.pxd | 2 +- pandas/_libs/tslibs/timezones.pyx | 3 +-- pandas/core/arrays/datetimes.py | 4 +++- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index ea97bab2198eb..37d83a73c6597 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -73,7 +73,7 @@ from pandas._libs.tslibs.nattype cimport ( ) from pandas._libs.tslibs.conversion cimport convert_to_tsobject from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64 -from pandas._libs.tslibs.timezones cimport get_timezone, tz_compare +from pandas._libs.tslibs.timezones cimport tz_compare from pandas._libs.tslibs.period cimport is_period_object from pandas._libs.tslibs.offsets cimport is_offset_object @@ -1789,7 +1789,7 @@ def is_datetime_with_singletz_array(values: ndarray) -> bool: for i in range(n): base_val = values[i] if base_val is not NaT: - base_tz = get_timezone(getattr(base_val, 'tzinfo', None)) + base_tz = getattr(base_val, 'tzinfo', None) break for j in range(i, n): diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 78483bd6fe09e..0179be3cdd8e6 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -7,7 +7,7 @@ cdef bint is_tzlocal(tzinfo tz) cdef bint treat_tz_as_pytz(tzinfo tz) -cpdef bint tz_compare(object start, object end) +cpdef bint tz_compare(tzinfo start, tzinfo end) cpdef object get_timezone(object tz) cpdef object maybe_get_tz(object tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 112da2eb5b624..5bf47efeccfb0 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -301,7 +301,7 @@ def infer_tzinfo(datetime start, datetime end): return tz -cpdef bint tz_compare(object start, object end): +cpdef bint tz_compare(tzinfo start, tzinfo end): """ Compare string representations of timezones @@ -324,7 +324,6 @@ cpdef bint tz_compare(object start, object end): Returns: ------- bool - """ # GH 18523 return get_timezone(start) == get_timezone(end) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 296f11583c372..12eefec0c149b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2266,7 +2266,9 @@ def validate_tz_from_dtype(dtype, tz: Optional[tzinfo]) -> Optional[tzinfo]: return tz -def _infer_tz_from_endpoints(start, end, tz): +def _infer_tz_from_endpoints( + start: Timestamp, end: Timestamp, tz: Optional[tzinfo] +) -> Optional[tzinfo]: """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one From 5309a5ca1568e91b4047655a4bed3e826e64f34c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 2 Jul 2020 07:42:54 -0700 Subject: [PATCH 028/632] BENCH: implement asvs for ints_to_pydatetime (#35091) --- asv_bench/benchmarks/tslibs/tslib.py | 55 ++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 asv_bench/benchmarks/tslibs/tslib.py diff --git a/asv_bench/benchmarks/tslibs/tslib.py b/asv_bench/benchmarks/tslibs/tslib.py new file mode 100644 index 0000000000000..eacf5a5731dc2 --- /dev/null +++ b/asv_bench/benchmarks/tslibs/tslib.py @@ -0,0 +1,55 @@ +""" +ipython analogue: + +tr = TimeIntsToPydatetime() +mi = pd.MultiIndex.from_product( + tr.params[:-1] + ([str(x) for x in tr.params[-1]],) +) +df = pd.DataFrame(np.nan, index=mi, columns=["mean", "stdev"]) +for box in tr.params[0]: + for size in tr.params[1]: + for tz in tr.params[2]: + tr.setup(box, size, tz) + key = (box, size, str(tz)) + print(key) + val = %timeit -o tr.time_ints_to_pydatetime(box, size, tz) + df.loc[key] = (val.average, val.stdev) +""" +from datetime import timedelta, timezone + +from dateutil.tz import gettz, tzlocal +import numpy as np +import pytz + +from pandas._libs.tslib import ints_to_pydatetime + +_tzs = [ + None, + timezone.utc, + timezone(timedelta(minutes=60)), + pytz.timezone("US/Pacific"), + gettz("Asia/Tokyo"), + tzlocal(), +] +_sizes = [0, 1, 100, 10 ** 4, 10 ** 6] + + +class TimeIntsToPydatetime: + params = ( + ["time", "date", "datetime", "timestamp"], + _sizes, + _tzs, + ) + param_names = ["box", "size", "tz"] + # TODO: fold? freq? + + def setup(self, box, size, tz): + arr = np.random.randint(0, 10, size=size, dtype="i8") + self.i8data = arr + + def time_ints_to_pydatetime(self, box, size, tz): + if box == "date": + # ints_to_pydatetime does not allow non-None tz with date; + # this will mean doing some duplicate benchmarks + tz = None + ints_to_pydatetime(self.i8data, tz, box=box) From efa7ebd16082c2a9558efb28916faa79e1dbcb94 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 2 Jul 2020 07:44:09 -0700 Subject: [PATCH 029/632] CLN: tighter typing in libconversion (#35088) --- pandas/_libs/tslibs/conversion.pxd | 4 +- pandas/_libs/tslibs/conversion.pyx | 36 ++++++------- pandas/_libs/tslibs/timestamps.pyx | 50 ++++++++++--------- .../scalar/timestamp/test_constructors.py | 5 +- 4 files changed, 46 insertions(+), 49 deletions(-) diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 623d9f14d646b..2cf75944a8196 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -13,11 +13,11 @@ cdef class _TSObject: bint fold -cdef convert_to_tsobject(object ts, object tz, object unit, +cdef convert_to_tsobject(object ts, tzinfo tz, object unit, bint dayfirst, bint yearfirst, int32_t nanos=*) -cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, +cdef _TSObject convert_datetime_to_tsobject(datetime ts, tzinfo tz, int32_t nanos=*) cdef int64_t get_datetime64_nanos(object val) except? -1 diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 9ee76a8c291a8..95500f66db156 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -307,7 +307,7 @@ cdef class _TSObject: return self.value -cdef convert_to_tsobject(object ts, object tz, object unit, +cdef convert_to_tsobject(object ts, tzinfo tz, object unit, bint dayfirst, bint yearfirst, int32_t nanos=0): """ Extract datetime and int64 from any of: @@ -326,13 +326,10 @@ cdef convert_to_tsobject(object ts, object tz, object unit, cdef: _TSObject obj - if tz is not None: - tz = maybe_get_tz(tz) - obj = _TSObject() if isinstance(ts, str): - return convert_str_to_tsobject(ts, tz, unit, dayfirst, yearfirst) + return _convert_str_to_tsobject(ts, tz, unit, dayfirst, yearfirst) if ts is None or ts is NaT: obj.value = NPY_NAT @@ -374,16 +371,16 @@ cdef convert_to_tsobject(object ts, object tz, object unit, f'Timestamp') if tz is not None: - localize_tso(obj, tz) + _localize_tso(obj, tz) if obj.value != NPY_NAT: - # check_overflows needs to run after localize_tso + # check_overflows needs to run after _localize_tso check_dts_bounds(&obj.dts) check_overflows(obj) return obj -cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, +cdef _TSObject convert_datetime_to_tsobject(datetime ts, tzinfo tz, int32_t nanos=0): """ Convert a datetime (or Timestamp) input `ts`, along with optional timezone @@ -446,8 +443,8 @@ cdef _TSObject convert_datetime_to_tsobject(datetime ts, object tz, return obj -cdef _TSObject create_tsobject_tz_using_offset(npy_datetimestruct dts, - int tzoffset, tzinfo tz=None): +cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts, + int tzoffset, tzinfo tz=None): """ Convert a datetimestruct `dts`, along with initial timezone offset `tzoffset` to a _TSObject (with timezone object `tz` - optional). @@ -500,9 +497,9 @@ cdef _TSObject create_tsobject_tz_using_offset(npy_datetimestruct dts, return obj -cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, - bint dayfirst=False, - bint yearfirst=False): +cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, object unit, + bint dayfirst=False, + bint yearfirst=False): """ Convert a string input `ts`, along with optional timezone object`tz` to a _TSObject. @@ -532,11 +529,6 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, int out_local = 0, out_tzoffset = 0 bint do_parse_datetime_string = False - if tz is not None: - tz = maybe_get_tz(tz) - - assert isinstance(ts, str) - if len(ts) == 0 or ts in nat_strings: ts = NaT elif ts == 'now': @@ -557,12 +549,12 @@ cdef _TSObject convert_str_to_tsobject(object ts, object tz, object unit, if not string_to_dts_failed: check_dts_bounds(&dts) if out_local == 1: - return create_tsobject_tz_using_offset(dts, - out_tzoffset, tz) + return _create_tsobject_tz_using_offset(dts, + out_tzoffset, tz) else: ts = dtstruct_to_dt64(&dts) if tz is not None: - # shift for localize_tso + # shift for _localize_tso ts = tz_localize_to_utc(np.array([ts], dtype='i8'), tz, ambiguous='raise')[0] @@ -613,7 +605,7 @@ cdef inline check_overflows(_TSObject obj): # ---------------------------------------------------------------------- # Localization -cdef inline void localize_tso(_TSObject obj, tzinfo tz): +cdef inline void _localize_tso(_TSObject obj, tzinfo tz): """ Given the UTC nanosecond timestamp in obj.value, find the wall-clock representation of that timestamp in the given timezone. diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 1328bc8a5175f..e104b722ea119 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -20,6 +20,7 @@ from cpython.datetime cimport ( datetime, time, tzinfo, + tzinfo as tzinfo_type, # alias bc `tzinfo` is a kwarg below PyDateTime_Check, PyDelta_Check, PyTZInfo_Check, @@ -932,7 +933,7 @@ class Timestamp(_Timestamp): second=None, microsecond=None, nanosecond=None, - tzinfo=None, + tzinfo_type tzinfo=None, *, fold=None ): @@ -957,18 +958,17 @@ class Timestamp(_Timestamp): # # Mixing pydatetime positional and keyword arguments is forbidden! - cdef _TSObject ts + cdef: + _TSObject ts + tzinfo_type tzobj _date_attributes = [year, month, day, hour, minute, second, microsecond, nanosecond] if tzinfo is not None: - if not PyTZInfo_Check(tzinfo): - # tzinfo must be a datetime.tzinfo object, GH#17690 - raise TypeError( - f"tzinfo must be a datetime.tzinfo object, not {type(tzinfo)}" - ) - elif tz is not None: + # GH#17690 tzinfo must be a datetime.tzinfo object, ensured + # by the cython annotation. + if tz is not None: raise ValueError('Can provide at most one of tz, tzinfo') # User passed tzinfo instead of tz; avoid silently ignoring @@ -1055,7 +1055,8 @@ class Timestamp(_Timestamp): raise ValueError("Cannot pass a datetime or Timestamp with tzinfo with " "the tz parameter. Use tz_convert instead.") - ts = convert_to_tsobject(ts_input, tz, unit, 0, 0, nanosecond or 0) + tzobj = maybe_get_tz(tz) + ts = convert_to_tsobject(ts_input, tzobj, unit, 0, 0, nanosecond or 0) if ts.value == NPY_NAT: return NaT @@ -1378,15 +1379,16 @@ default 'raise' cdef: npy_datetimestruct dts - int64_t value, value_tz, offset - object _tzinfo, result, k, v + int64_t value, value_tz + object k, v datetime ts_input + tzinfo_type tzobj # set to naive if needed - _tzinfo = self.tzinfo + tzobj = self.tzinfo value = self.value - if _tzinfo is not None: - value_tz = tz_convert_single(value, _tzinfo, UTC) + if tzobj is not None: + value_tz = tz_convert_single(value, tzobj, UTC) value += value - value_tz # setup components @@ -1419,30 +1421,30 @@ default 'raise' if nanosecond is not None: dts.ps = validate('nanosecond', nanosecond) * 1000 if tzinfo is not object: - _tzinfo = tzinfo + tzobj = tzinfo # reconstruct & check bounds - if _tzinfo is not None and treat_tz_as_pytz(_tzinfo): + if tzobj is not None and treat_tz_as_pytz(tzobj): # replacing across a DST boundary may induce a new tzinfo object # see GH#18319 - ts_input = _tzinfo.localize(datetime(dts.year, dts.month, dts.day, - dts.hour, dts.min, dts.sec, - dts.us), - is_dst=not bool(fold)) - _tzinfo = ts_input.tzinfo + ts_input = tzobj.localize(datetime(dts.year, dts.month, dts.day, + dts.hour, dts.min, dts.sec, + dts.us), + is_dst=not bool(fold)) + tzobj = ts_input.tzinfo else: kwargs = {'year': dts.year, 'month': dts.month, 'day': dts.day, 'hour': dts.hour, 'minute': dts.min, 'second': dts.sec, - 'microsecond': dts.us, 'tzinfo': _tzinfo, + 'microsecond': dts.us, 'tzinfo': tzobj, 'fold': fold} ts_input = datetime(**kwargs) - ts = convert_datetime_to_tsobject(ts_input, _tzinfo) + ts = convert_datetime_to_tsobject(ts_input, tzobj) value = ts.value + (dts.ps // 1000) if value != NPY_NAT: check_dts_bounds(&dts) - return create_timestamp_from_ts(value, dts, _tzinfo, self.freq, fold) + return create_timestamp_from_ts(value, dts, tzobj, self.freq, fold) def to_julian_date(self) -> np.float64: """ diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index 770753f42a4c8..316a299ba1cbb 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -174,7 +174,10 @@ def test_constructor_invalid(self): def test_constructor_invalid_tz(self): # GH#17690 - msg = "must be a datetime.tzinfo" + msg = ( + "Argument 'tzinfo' has incorrect type " + r"\(expected datetime.tzinfo, got str\)" + ) with pytest.raises(TypeError, match=msg): Timestamp("2017-10-22", tzinfo="US/Eastern") From c23228cbb484c25ed91baf8089b4cc2efa09b3d6 Mon Sep 17 00:00:00 2001 From: Tom Date: Thu, 2 Jul 2020 15:51:27 +0100 Subject: [PATCH 030/632] CLN: Removed unreached else block GH33478 (#35086) --- pandas/core/groupby/generic.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index dab8475d9580c..6f956a3dcc9b6 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1218,7 +1218,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return self.obj._constructor() elif isinstance(first_not_none, DataFrame): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) - elif self.grouper.groupings is not None: + else: if len(self.grouper.groupings) > 1: key_index = self.grouper.result_index @@ -1373,10 +1373,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # of columns return self.obj._constructor_sliced(values, index=key_index) - else: - # Handle cases like BinGrouper - return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) - def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs ): From cac4bfe82fab3549d2140b8eb90e6accd45fdc27 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 2 Jul 2020 07:57:18 -0700 Subject: [PATCH 031/632] BENCH: implement asvs for get_resolution (#35075) --- asv_bench/benchmarks/tslibs/resolution.py | 50 +++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 asv_bench/benchmarks/tslibs/resolution.py diff --git a/asv_bench/benchmarks/tslibs/resolution.py b/asv_bench/benchmarks/tslibs/resolution.py new file mode 100644 index 0000000000000..274aa1ad6d4a9 --- /dev/null +++ b/asv_bench/benchmarks/tslibs/resolution.py @@ -0,0 +1,50 @@ +""" +ipython analogue: + +tr = TimeResolution() +mi = pd.MultiIndex.from_product(tr.params[:-1] + ([str(x) for x in tr.params[-1]],)) +df = pd.DataFrame(np.nan, index=mi, columns=["mean", "stdev"]) + +for unit in tr.params[0]: + for size in tr.params[1]: + for tz in tr.params[2]: + tr.setup(unit, size, tz) + key = (unit, size, str(tz)) + print(key) + + val = %timeit -o tr.time_get_resolution(unit, size, tz) + + df.loc[key] = (val.average, val.stdev) + +""" +from datetime import timedelta, timezone + +from dateutil.tz import gettz, tzlocal +import numpy as np +import pytz + +from pandas._libs.tslibs.resolution import get_resolution + + +class TimeResolution: + params = ( + ["D", "h", "m", "s", "us", "ns"], + [1, 100, 10 ** 4, 10 ** 6], + [ + None, + timezone.utc, + timezone(timedelta(minutes=60)), + pytz.timezone("US/Pacific"), + gettz("Asia/Tokyo"), + tzlocal(), + ], + ) + param_names = ["unit", "size", "tz"] + + def setup(self, unit, size, tz): + arr = np.random.randint(0, 10, size=size, dtype="i8") + arr = arr.view(f"M8[{unit}]").astype("M8[ns]").view("i8") + self.i8data = arr + + def time_get_resolution(self, unit, size, tz): + get_resolution(self.i8data, tz) From 1d6f2bfe7966fa667a50b7ca2b1876fb145f6b9a Mon Sep 17 00:00:00 2001 From: SanthoshBala18 Date: Thu, 2 Jul 2020 20:31:39 +0530 Subject: [PATCH 032/632] Fixed #34859: Added support for '0' and '1' in BooleanArray._from_sequence_of_strings method (#35061) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/arrays/boolean.py | 4 ++-- .../tests/arrays/boolean/test_construction.py | 5 +++-- pandas/tests/io/parser/test_dtypes.py | 22 ++++++++++++++++++- 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 8c6add92e658b..40839f6c70d6e 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -328,6 +328,7 @@ Other enhancements - :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list or dict to change only some specific columns' width (:issue:`28917`). - :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`) - :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). +- :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index 9f1c2c6e668ad..dbce71b77a425 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -286,9 +286,9 @@ def _from_sequence_of_strings( def map_string(s): if isna(s): return s - elif s in ["True", "TRUE", "true"]: + elif s in ["True", "TRUE", "true", "1", "1.0"]: return True - elif s in ["False", "FALSE", "false"]: + elif s in ["False", "FALSE", "false", "0", "0.0"]: return False else: raise ValueError(f"{s} cannot be cast to bool") diff --git a/pandas/tests/arrays/boolean/test_construction.py b/pandas/tests/arrays/boolean/test_construction.py index f7354a089df3b..2f5c61304d415 100644 --- a/pandas/tests/arrays/boolean/test_construction.py +++ b/pandas/tests/arrays/boolean/test_construction.py @@ -247,10 +247,11 @@ def test_coerce_to_numpy_array(): def test_to_boolean_array_from_strings(): result = BooleanArray._from_sequence_of_strings( - np.array(["True", "False", np.nan], dtype=object) + np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object) ) expected = BooleanArray( - np.array([True, False, False]), np.array([False, False, True]) + np.array([True, False, True, True, False, False, False]), + np.array([False, False, False, False, False, False, True]), ) tm.assert_extension_array_equal(result, expected) diff --git a/pandas/tests/io/parser/test_dtypes.py b/pandas/tests/io/parser/test_dtypes.py index 6298d1e5498f3..6ac310e3b2227 100644 --- a/pandas/tests/io/parser/test_dtypes.py +++ b/pandas/tests/io/parser/test_dtypes.py @@ -561,9 +561,13 @@ def test_boolean_dtype(all_parsers): "True", "TRUE", "true", + "1", + "1.0", "False", "FALSE", "false", + "0", + "0.0", "NaN", "nan", "NA", @@ -576,7 +580,23 @@ def test_boolean_dtype(all_parsers): expected = pd.DataFrame( { "a": pd.array( - [True, True, True, False, False, False, None, None, None, None, None], + [ + True, + True, + True, + True, + True, + False, + False, + False, + False, + False, + None, + None, + None, + None, + None, + ], dtype="boolean", ) } From 997c9144c739fc0ddecf60da7153154f2971d61f Mon Sep 17 00:00:00 2001 From: pizzathief Date: Fri, 3 Jul 2020 01:26:43 +1000 Subject: [PATCH 033/632] BUG: DataFrame.melt gives unexpected result when column "value" already exists (#35003) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/reshape/melt.py | 11 +++++++++++ pandas/tests/reshape/test_melt.py | 14 ++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 40839f6c70d6e..9bd4ddbb624d9 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -819,6 +819,7 @@ Deprecations - :meth:`util.testing.assert_almost_equal` now accepts both relative and absolute precision through the ``rtol``, and ``atol`` parameters, thus deprecating the ``check_less_precise`` parameter. (:issue:`13357`). +- :func:`DataFrame.melt` accepting a value_name that already exists is deprecated, and will be removed in a future version (:issue:`34731`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index cd0619738677d..923b9e7462d8b 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -1,5 +1,6 @@ import re from typing import TYPE_CHECKING, List, cast +import warnings import numpy as np @@ -40,6 +41,16 @@ def melt( else: cols = list(frame.columns) + if value_name in frame.columns: + warnings.warn( + "This dataframe has a column name that matches the 'value_name' column " + "name of the resultiing Dataframe. " + "In the future this will raise an error, please set the 'value_name' " + "parameter of DataFrame.melt to a unique name.", + FutureWarning, + stacklevel=3, + ) + if id_vars is not None: if not is_list_like(id_vars): id_vars = [id_vars] diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 000a6354277ab..a0fa10802f860 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -1014,3 +1014,17 @@ def test_col_substring_of_stubname(self): ) result = pd.wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time") tm.assert_frame_equal(result, expected) + + def test_warn_of_column_name_value(self): + # GH34731 + # raise a warning if the resultant value column name matches + # a name in the dataframe already (default name is "value") + df = pd.DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) + expected = pd.DataFrame( + [["A", "col", "A"], ["B", "col", "B"], ["C", "col", "C"]], + columns=["value", "variable", "value"], + ) + + with tm.assert_produces_warning(FutureWarning): + result = df.melt(id_vars="value") + tm.assert_frame_equal(result, expected) From 7412c5ace136cb65915ac5ea10cfb1921a3cb099 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 2 Jul 2020 09:59:10 -0700 Subject: [PATCH 034/632] PERF: tz_convert/tz_convert_single (#35087) --- pandas/_libs/tslibs/tzconversion.pyx | 48 +++++++++++++------------- pandas/tests/tslibs/test_conversion.py | 7 ++-- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index a096b2807c640..d1d6bc40ef288 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -345,36 +345,28 @@ cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2): converted: int64 """ cdef: - int64_t utc_date int64_t arr[1] + bint to_utc = is_utc(tz2) + tzinfo tz # See GH#17734 We should always be converting either from UTC or to UTC - assert is_utc(tz1) or is_utc(tz2) + assert is_utc(tz1) or to_utc if val == NPY_NAT: return val - # Convert to UTC - if is_tzlocal(tz1): - utc_date = _tz_convert_tzlocal_utc(val, tz1, to_utc=True) - elif not is_utc(tz1): - arr[0] = val - utc_date = _tz_convert_dst(arr, tz1, to_utc=True)[0] + if to_utc: + tz = tz1 else: - utc_date = val + tz = tz2 - if is_utc(tz2): - return utc_date - elif is_tzlocal(tz2): - return _tz_convert_tzlocal_utc(utc_date, tz2, to_utc=False) + if is_utc(tz): + return val + elif is_tzlocal(tz): + return _tz_convert_tzlocal_utc(val, tz, to_utc=to_utc) else: - # Convert UTC to other timezone - arr[0] = utc_date - # Note: at least with cython 0.28.3, doing a lookup `[0]` in the next - # line is sensitive to the declared return type of _tz_convert_dst; - # if it is declared as returning ndarray[int64_t], a compile-time error - # is raised. - return _tz_convert_dst(arr, tz2, to_utc=False)[0] + arr[0] = val + return _tz_convert_dst(arr, tz, to_utc=to_utc)[0] def tz_convert(int64_t[:] vals, tzinfo tz1, tzinfo tz2): @@ -392,14 +384,22 @@ def tz_convert(int64_t[:] vals, tzinfo tz1, tzinfo tz2): int64 ndarray of converted """ cdef: - int64_t[:] utc_dates, converted + int64_t[:] converted + bint to_utc = is_utc(tz2) + tzinfo tz + + # See GH#17734 We should always be converting either from UTC or to UTC + assert is_utc(tz1) or to_utc if len(vals) == 0: return np.array([], dtype=np.int64) - # Convert to UTC - utc_dates = _tz_convert_one_way(vals, tz1, to_utc=True) - converted = _tz_convert_one_way(utc_dates, tz2, to_utc=False) + if to_utc: + tz = tz1 + else: + tz = tz2 + + converted = _tz_convert_one_way(vals, tz, to_utc=to_utc) return np.array(converted, dtype=np.int64) diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index fd8c9df026674..3a7e06fb14a5f 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -57,9 +57,10 @@ def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): ], ) def test_tz_convert_corner(arr): - result = tzconversion.tz_convert( - arr, timezones.maybe_get_tz("US/Eastern"), timezones.maybe_get_tz("Asia/Tokyo") - ) + result = tzconversion.tz_convert(arr, timezones.maybe_get_tz("US/Eastern"), UTC) + tm.assert_numpy_array_equal(result, arr) + + result = tzconversion.tz_convert(arr, UTC, timezones.maybe_get_tz("Asia/Tokyo")) tm.assert_numpy_array_equal(result, arr) From a4e19fa521221a30747bda1e0e72768e071f6d17 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 2 Jul 2020 10:34:51 -0700 Subject: [PATCH 035/632] TYP: type for get_timezone (#35096) --- pandas/_libs/tslibs/timezones.pxd | 2 +- pandas/_libs/tslibs/timezones.pyx | 9 +++------ pandas/core/arrays/datetimes.py | 17 ++++++----------- pandas/core/indexes/datetimes.py | 4 +--- 4 files changed, 11 insertions(+), 21 deletions(-) diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 0179be3cdd8e6..0784b090b3edb 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -8,7 +8,7 @@ cdef bint is_tzlocal(tzinfo tz) cdef bint treat_tz_as_pytz(tzinfo tz) cpdef bint tz_compare(tzinfo start, tzinfo end) -cpdef object get_timezone(object tz) +cpdef object get_timezone(tzinfo tz) cpdef object maybe_get_tz(object tz) cdef get_utcoffset(tzinfo tz, obj) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 5bf47efeccfb0..0460591048801 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,6 +1,5 @@ from datetime import timezone -from cpython.datetime cimport datetime, tzinfo, PyTZInfo_Check, PyDateTime_IMPORT -PyDateTime_IMPORT +from cpython.datetime cimport datetime, tzinfo # dateutil compat from dateutil.tz import ( @@ -47,7 +46,7 @@ cdef inline bint treat_tz_as_dateutil(tzinfo tz): return hasattr(tz, '_trans_list') and hasattr(tz, '_trans_idx') -cpdef inline object get_timezone(object tz): +cpdef inline object get_timezone(tzinfo tz): """ We need to do several things here: 1) Distinguish between pytz and dateutil timezones @@ -60,9 +59,7 @@ cpdef inline object get_timezone(object tz): the tz name. It needs to be a string so that we can serialize it with UJSON/pytables. maybe_get_tz (below) is the inverse of this process. """ - if not PyTZInfo_Check(tz): - return tz - elif is_utc(tz): + if is_utc(tz): return tz else: if treat_tz_as_dateutil(tz): diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 12eefec0c149b..64e8582baaa98 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -522,13 +522,6 @@ def tzinfo(self): """ return self.tz - @property # NB: override with cache_readonly in immutable subclasses - def _timezone(self): - """ - Comparable timezone both for pytz / dateutil - """ - return timezones.get_timezone(self.tzinfo) - @property # NB: override with cache_readonly in immutable subclasses def is_normalized(self): """ @@ -617,15 +610,17 @@ def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): # ----------------------------------------------------------------- # Comparison Methods - def _has_same_tz(self, other): - zzone = self._timezone + def _has_same_tz(self, other) -> bool: # vzone shouldn't be None if value is non-datetime like if isinstance(other, np.datetime64): # convert to Timestamp as np.datetime64 doesn't have tz attr other = Timestamp(other) - vzone = timezones.get_timezone(getattr(other, "tzinfo", "__no_tz__")) - return zzone == vzone + + if not hasattr(other, "tzinfo"): + return False + other_tz = other.tzinfo + return timezones.tz_compare(self.tzinfo, other_tz) def _assert_tzawareness_compat(self, other): # adapted from _Timestamp._assert_tzawareness_compat diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f3c96db0a8d6e..86c6cdf5b15c7 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -74,9 +74,7 @@ def _new_DatetimeIndex(cls, d): DatetimeArray, wrap=True, ) -@inherit_names( - ["_timezone", "is_normalized", "_resolution_obj"], DatetimeArray, cache=True -) +@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True) @inherit_names( [ "_bool_ops", From a6b5e167a4d3ac6e8bd6e041df85f85294cd972b Mon Sep 17 00:00:00 2001 From: Steffen Rehberg Date: Fri, 3 Jul 2020 16:14:18 +0200 Subject: [PATCH 036/632] DOC: Fix code formatting and typos in Series.tz_localize (#35110) --- pandas/core/generic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a66cade3b81b0..d892e2487b31c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9580,8 +9580,9 @@ def tz_localize( dtype: int64 If the DST transition causes nonexistent times, you can shift these - dates forward or backwards with a timedelta object or `'shift_forward'` - or `'shift_backwards'`. + dates forward or backward with a timedelta object or `'shift_forward'` + or `'shift_backward'`. + >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) From 7e25af85fa6fc91bc606d130a4f81b1af8fd4d91 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Fri, 3 Jul 2020 10:07:31 -0500 Subject: [PATCH 037/632] PERF: Fix quantile perf regression (#35101) --- pandas/util/_validators.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index bb6c6de441558..fa7201a5188a5 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -371,14 +371,13 @@ def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray: ValueError if percentiles are not in given interval([0, 1]). """ q_arr = np.asarray(q) - msg = ( - "percentiles should all be in the interval [0, 1]." - f"Try {q_arr / 100.0} instead." - ) + # Don't change this to an f-string. The string formatting + # is too expensive for cases where we don't need it. + msg = "percentiles should all be in the interval [0, 1]. Try {} instead." if q_arr.ndim == 0: if not 0 <= q_arr <= 1: - raise ValueError(msg) + raise ValueError(msg.format(q_arr / 100.0)) else: if not all(0 <= qs <= 1 for qs in q_arr): - raise ValueError(msg) + raise ValueError(msg.format(q_arr / 100.0)) return q_arr From 19e8fcc2719575ff0d6a444d035645d009aa2bfa Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Sat, 4 Jul 2020 16:30:15 +0100 Subject: [PATCH 038/632] CLN: convert lambda to function (#35117) --- pandas/io/formats/printing.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 36e774305b577..1cf79dc105901 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -276,9 +276,13 @@ class TableSchemaFormatter(BaseFormatter): formatters[mimetype].enabled = False -default_pprint = lambda x, max_seq_items=None: pprint_thing( - x, escape_chars=("\t", "\r", "\n"), quote_strings=True, max_seq_items=max_seq_items -) +def default_pprint(thing: Any, max_seq_items: Optional[int] = None) -> str: + return pprint_thing( + thing, + escape_chars=("\t", "\r", "\n"), + quote_strings=True, + max_seq_items=max_seq_items, + ) def format_object_summary( From 77f4a4d673e2d2b3f7396a94818771d332bd913c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 5 Jul 2020 17:12:46 -0700 Subject: [PATCH 039/632] REF: dont consolidate in BlockManager.equals (#34962) * REF: dont consolidate in BlockManager.equals * doctest fixup * Remove Block.equals * simplify, comments --- pandas/core/internals/blocks.py | 27 +------------ pandas/core/internals/managers.py | 48 +++++++++++++++--------- pandas/tests/internals/test_internals.py | 4 +- 3 files changed, 33 insertions(+), 46 deletions(-) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6207785fb2975..d8779dae7c384 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -56,12 +56,7 @@ ABCPandasArray, ABCSeries, ) -from pandas.core.dtypes.missing import ( - _isna_compat, - array_equivalent, - is_valid_nat_for_dtype, - isna, -) +from pandas.core.dtypes.missing import _isna_compat, is_valid_nat_for_dtype, isna import pandas.core.algorithms as algos from pandas.core.array_algos.transforms import shift @@ -1383,11 +1378,6 @@ def where_func(cond, values, other): return result_blocks - def equals(self, other) -> bool: - if self.dtype != other.dtype or self.shape != other.shape: - return False - return array_equivalent(self.values, other.values) - def _unstack(self, unstacker, fill_value, new_placement): """ Return a list of unstacked blocks of self @@ -1881,9 +1871,6 @@ def where( return [self.make_block_same_class(result, placement=self.mgr_locs)] - def equals(self, other) -> bool: - return self.values.equals(other.values) - def _unstack(self, unstacker, fill_value, new_placement): # ExtensionArray-safe unstack. # We override ObjectBlock._unstack, which unstacks directly on the @@ -1929,12 +1916,6 @@ class NumericBlock(Block): class FloatOrComplexBlock(NumericBlock): __slots__ = () - def equals(self, other) -> bool: - if self.dtype != other.dtype or self.shape != other.shape: - return False - left, right = self.values, other.values - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() - class FloatBlock(FloatOrComplexBlock): __slots__ = () @@ -2298,12 +2279,6 @@ def setitem(self, indexer, value): ) return newb.setitem(indexer, value) - def equals(self, other) -> bool: - # override for significant performance improvement - if self.dtype != other.dtype or self.shape != other.shape: - return False - return (self.values.view("i8") == other.values.view("i8")).all() - def quantile(self, qs, interpolation="linear", axis=0): naive = self.values.view("M8[ns]") diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b2f2277d9a7dc..c82670106d3b6 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -19,6 +19,7 @@ from pandas.core.dtypes.common import ( DT64NS_DTYPE, is_datetimelike_v_numeric, + is_dtype_equal, is_extension_array_dtype, is_list_like, is_numeric_v_string_like, @@ -27,9 +28,10 @@ from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries -from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.missing import array_equivalent, isna import pandas.core.algorithms as algos +from pandas.core.arrays import ExtensionArray from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com @@ -1409,29 +1411,39 @@ def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True ) - def equals(self, other) -> bool: + def equals(self, other: "BlockManager") -> bool: self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): return False if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): return False - self._consolidate_inplace() - other._consolidate_inplace() - if len(self.blocks) != len(other.blocks): - return False - # canonicalize block order, using a tuple combining the mgr_locs - # then type name because there might be unconsolidated - # blocks (say, Categorical) which can only be distinguished by - # the iteration order - def canonicalize(block): - return (block.mgr_locs.as_array.tolist(), block.dtype.name) - - self_blocks = sorted(self.blocks, key=canonicalize) - other_blocks = sorted(other.blocks, key=canonicalize) - return all( - block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks) - ) + if self.ndim == 1: + # For SingleBlockManager (i.e.Series) + if other.ndim != 1: + return False + left = self.blocks[0].values + right = other.blocks[0].values + if not is_dtype_equal(left.dtype, right.dtype): + return False + elif isinstance(left, ExtensionArray): + return left.equals(right) + else: + return array_equivalent(left, right) + + for i in range(len(self.items)): + # Check column-wise, return False if any column doesnt match + left = self.iget_values(i) + right = other.iget_values(i) + if not is_dtype_equal(left.dtype, right.dtype): + return False + elif isinstance(left, ExtensionArray): + if not left.equals(right): + return False + else: + if not array_equivalent(left, right): + return False + return True def unstack(self, unstacker, fill_value) -> "BlockManager": """ diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 5fd44d7cd74a9..06ccdd2484a2a 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -377,7 +377,7 @@ def test_copy(self, mgr): for blk, cp_blk in zip(mgr.blocks, cp.blocks): # view assertion - assert cp_blk.equals(blk) + tm.assert_equal(cp_blk.values, blk.values) if isinstance(blk.values, np.ndarray): assert cp_blk.values.base is blk.values.base else: @@ -389,7 +389,7 @@ def test_copy(self, mgr): # copy assertion we either have a None for a base or in case of # some blocks it is an array (e.g. datetimetz), but was copied - assert cp_blk.equals(blk) + tm.assert_equal(cp_blk.values, blk.values) if not isinstance(cp_blk.values, np.ndarray): assert cp_blk.values._data.base is not blk.values._data.base else: From 8f21cb5135929ad4e23082b9cff18b2ea38414db Mon Sep 17 00:00:00 2001 From: Alex Kirko Date: Mon, 6 Jul 2020 17:19:43 +0300 Subject: [PATCH 040/632] CI: pin isort version (#35136) --- environment.yml | 2 +- requirements-dev.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 2429f4ab3d699..24c0832b6fb4c 100644 --- a/environment.yml +++ b/environment.yml @@ -20,7 +20,7 @@ dependencies: - flake8<3.8.0 # temporary pin, GH#34150 - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions - flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files - - isort # check that imports are in the right order + - isort=4.3.21 # check that imports are in the right order - mypy=0.730 - pycodestyle # used by flake8 diff --git a/requirements-dev.txt b/requirements-dev.txt index 44c975a3b3cfb..eda0fa8f32b19 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -11,7 +11,7 @@ cpplint flake8<3.8.0 flake8-comprehensions>=3.1.0 flake8-rst>=0.6.0,<=0.7.0 -isort +isort==4.3.21 mypy==0.730 pycodestyle gitpython From fdaa5e4f35dd8ee84666476b7a2567cc08e7229f Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Mon, 6 Jul 2020 16:30:35 +0200 Subject: [PATCH 041/632] CI: pin sphinx <= 3.1.1 for autodoc failure (#35139) --- environment.yml | 2 +- requirements-dev.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 24c0832b6fb4c..80dbffebf6b9d 100644 --- a/environment.yml +++ b/environment.yml @@ -27,7 +27,7 @@ dependencies: # documentation - gitpython # obtain contributors from git for whatsnew - gitdb2=2.0.6 # GH-32060 - - sphinx + - sphinx<=3.1.1 # documentation (jupyter notebooks) - nbconvert>=5.4.1 diff --git a/requirements-dev.txt b/requirements-dev.txt index eda0fa8f32b19..886f400caf44f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -16,7 +16,7 @@ mypy==0.730 pycodestyle gitpython gitdb2==2.0.6 -sphinx +sphinx<=3.1.1 nbconvert>=5.4.1 nbsphinx pandoc From 15884f3a907f13ce3306799560dfc707596e3dff Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 6 Jul 2020 10:19:36 -0500 Subject: [PATCH 042/632] Fix numpy warning (#35085) --- pandas/conftest.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pandas/conftest.py b/pandas/conftest.py index d74c43069574f..5fe4cc45b0006 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -256,9 +256,7 @@ def nselect_method(request): # ---------------------------------------------------------------- # Missing values & co. # ---------------------------------------------------------------- -@pytest.fixture( - params=[None, np.nan, pd.NaT, float("nan"), np.float("NaN"), pd.NA], ids=str -) +@pytest.fixture(params=[None, np.nan, pd.NaT, float("nan"), pd.NA], ids=str) def nulls_fixture(request): """ Fixture for each null type in pandas. From 74538104bc04238f69ea6c3a6fbc5468fe02ed12 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 11:01:30 -0700 Subject: [PATCH 043/632] TYP: type unit as str (#35099) --- pandas/_libs/tslib.pyx | 6 +++--- pandas/_libs/tslibs/conversion.pxd | 2 +- pandas/_libs/tslibs/conversion.pyx | 20 ++++++++++++++++---- pandas/_libs/tslibs/timedeltas.pxd | 2 +- pandas/_libs/tslibs/timedeltas.pyx | 6 +++--- pandas/_libs/tslibs/timestamps.pyx | 1 + 6 files changed, 25 insertions(+), 12 deletions(-) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index f494e74bde55f..8d8a62a58f25f 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -363,8 +363,8 @@ def format_array_from_datetime( def array_with_unit_to_datetime( ndarray values, - object unit, - str errors='coerce' + str unit, + str errors="coerce" ): """ Convert the ndarray to datetime according to the time unit. @@ -384,7 +384,7 @@ def array_with_unit_to_datetime( ---------- values : ndarray of object Date-like objects to convert. - unit : object + unit : str Time unit to use during conversion. errors : str, default 'raise' Error behavior when parsing. diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 2cf75944a8196..0eb94fecf7d6b 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -13,7 +13,7 @@ cdef class _TSObject: bint fold -cdef convert_to_tsobject(object ts, tzinfo tz, object unit, +cdef convert_to_tsobject(object ts, tzinfo tz, str unit, bint dayfirst, bint yearfirst, int32_t nanos=*) diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 95500f66db156..a1f074b1b29a8 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -56,8 +56,19 @@ TD64NS_DTYPE = np.dtype('m8[ns]') # Unit Conversion Helpers cdef inline int64_t cast_from_unit(object ts, str unit) except? -1: - """ return a casting of the unit represented to nanoseconds - round the fractional part of a float to our precision, p """ + """ + Return a casting of the unit represented to nanoseconds + round the fractional part of a float to our precision, p. + + Parameters + ---------- + ts : int, float, or None + unit : str + + Returns + ------- + int64_t + """ cdef: int64_t m int p @@ -307,7 +318,7 @@ cdef class _TSObject: return self.value -cdef convert_to_tsobject(object ts, tzinfo tz, object unit, +cdef convert_to_tsobject(object ts, tzinfo tz, str unit, bint dayfirst, bint yearfirst, int32_t nanos=0): """ Extract datetime and int64 from any of: @@ -497,7 +508,7 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts, return obj -cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, object unit, +cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, str unit, bint dayfirst=False, bint yearfirst=False): """ @@ -513,6 +524,7 @@ cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, object unit, Value to be converted to _TSObject tz : tzinfo or None timezone for the timezone-aware output + unit : str or None dayfirst : bool, default False When parsing an ambiguous date string, interpret e.g. "3/4/1975" as April 3, as opposed to the standard US interpretation March 4. diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index 70a418d7803d1..4142861e9ad38 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -3,7 +3,7 @@ from numpy cimport int64_t # Exposed for tslib, not intended for outside use. cpdef int64_t delta_to_nanoseconds(delta) except? -1 -cdef convert_to_timedelta64(object ts, object unit) +cdef convert_to_timedelta64(object ts, str unit) cdef bint is_any_td_scalar(object obj) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 2862e62e3d522..8f3a599bf107c 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -160,7 +160,7 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1: raise TypeError(type(delta)) -cdef convert_to_timedelta64(object ts, object unit): +cdef convert_to_timedelta64(object ts, str unit): """ Convert an incoming object to a timedelta64 if possible. Before calling, unit must be standardized to avoid repeated unit conversion @@ -218,7 +218,7 @@ cdef convert_to_timedelta64(object ts, object unit): @cython.boundscheck(False) @cython.wraparound(False) -def array_to_timedelta64(object[:] values, unit=None, errors='raise'): +def array_to_timedelta64(object[:] values, str unit=None, str errors="raise"): """ Convert an ndarray to an array of timedeltas. If errors == 'coerce', coerce non-convertible objects to NaT. Otherwise, raise. @@ -470,7 +470,7 @@ cdef inline timedelta_from_spec(object number, object frac, object unit): return cast_from_unit(float(n), unit) -cpdef inline str parse_timedelta_unit(object unit): +cpdef inline str parse_timedelta_unit(str unit): """ Parameters ---------- diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index e104b722ea119..b79a5c49bdd10 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1050,6 +1050,7 @@ class Timestamp(_Timestamp): nanosecond = hour tz = minute freq = None + unit = None if getattr(ts_input, 'tzinfo', None) is not None and tz is not None: raise ValueError("Cannot pass a datetime or Timestamp with tzinfo with " From efcf4b424f759c7785e54dbf67c5ab0c53c34be4 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 6 Jul 2020 21:55:02 +0100 Subject: [PATCH 044/632] BUG: reset_index is passing a bad dtype to NumPy (#35111) --- pandas/core/frame.py | 10 +++++++--- pandas/tests/frame/methods/test_reset_index.py | 17 ++++++++++++++--- pandas/tests/series/methods/test_reset_index.py | 17 ++++++++++++++--- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b6993e9ed851a..87041341ac3a6 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -75,6 +75,7 @@ from pandas.core.dtypes.cast import ( cast_scalar_to_array, coerce_to_dtypes, + construct_1d_arraylike_from_scalar, find_common_type, infer_dtype_from_scalar, invalidate_string_dtypes, @@ -109,7 +110,7 @@ needs_i8_conversion, pandas_dtype, ) -from pandas.core.dtypes.missing import isna, notna +from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna from pandas.core import algorithms, common as com, nanops, ops from pandas.core.accessor import CachedAccessor @@ -4731,8 +4732,11 @@ def _maybe_casted_values(index, labels=None): # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's if mask.all(): - values = np.empty(len(mask), dtype=index.dtype) - values.fill(np.nan) + dtype = index.dtype + fill_value = na_value_for_dtype(dtype) + values = construct_1d_arraylike_from_scalar( + fill_value, len(mask), dtype + ) else: values = values.take(labels) diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index 79442acccb326..cf0bbe144caa5 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -3,6 +3,7 @@ import numpy as np import pytest +import pandas as pd from pandas import ( DataFrame, Index, @@ -299,9 +300,19 @@ def test_reset_index_range(self): tm.assert_frame_equal(result, expected) -def test_reset_index_dtypes_on_empty_frame_with_multiindex(): +@pytest.mark.parametrize( + "array, dtype", + [ + (["a", "b"], object), + ( + pd.period_range("12-1-2000", periods=2, freq="Q-DEC"), + pd.PeriodDtype(freq="Q-DEC"), + ), + ], +) +def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype): # GH 19602 - Preserve dtype on empty DataFrame with MultiIndex - idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], ["a", "b"]]) + idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array]) result = DataFrame(index=idx)[:0].reset_index().dtypes - expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": object}) + expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": dtype}) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py index a11590d42552d..597b43a370ef5 100644 --- a/pandas/tests/series/methods/test_reset_index.py +++ b/pandas/tests/series/methods/test_reset_index.py @@ -1,6 +1,7 @@ import numpy as np import pytest +import pandas as pd from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series import pandas._testing as tm @@ -110,11 +111,21 @@ def test_reset_index_drop_errors(self): s.reset_index("wrong", drop=True) -def test_reset_index_dtypes_on_empty_series_with_multiindex(): +@pytest.mark.parametrize( + "array, dtype", + [ + (["a", "b"], object), + ( + pd.period_range("12-1-2000", periods=2, freq="Q-DEC"), + pd.PeriodDtype(freq="Q-DEC"), + ), + ], +) +def test_reset_index_dtypes_on_empty_series_with_multiindex(array, dtype): # GH 19602 - Preserve dtype on empty Series with MultiIndex - idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], ["a", "b"]]) + idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array]) result = Series(dtype=object, index=idx)[:0].reset_index().dtypes expected = Series( - {"level_0": np.int64, "level_1": np.float64, "level_2": object, 0: object} + {"level_0": np.int64, "level_1": np.float64, "level_2": dtype, 0: object} ) tm.assert_series_equal(result, expected) From 9e4461811a191c328b073642355f70ceb163197e Mon Sep 17 00:00:00 2001 From: Gabriel Tutui Date: Mon, 6 Jul 2020 18:28:07 -0300 Subject: [PATCH 045/632] TST: Add test for category equalness on applies (#21239) (#35125) --- pandas/tests/frame/test_apply.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 114b3c0d0a3fc..3a32278e2a4b1 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -793,6 +793,18 @@ def test_apply_with_byte_string(self): result = df.apply(lambda x: x.astype("object")) tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize("val", ["asd", 12, None, np.NaN]) + def test_apply_category_equalness(self, val): + # Check if categorical comparisons on apply, GH 21239 + df_values = ["asd", None, 12, "asd", "cde", np.NaN] + df = pd.DataFrame({"a": df_values}, dtype="category") + + result = df.a.apply(lambda x: x == val) + expected = pd.Series( + [np.NaN if pd.isnull(x) else x == val for x in df_values], name="a" + ) + tm.assert_series_equal(result, expected) + class TestInferOutputShape: # the user has supplied an opaque UDF where From 7e9f66e7285d5bb16eada54c68ad4e8976ee6c6c Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Mon, 6 Jul 2020 15:37:56 -0700 Subject: [PATCH 046/632] ENH: Add support for calculating EWMA with a time component (#34839) --- asv_bench/benchmarks/rolling.py | 7 ++ doc/source/user_guide/computation.rst | 19 +++++ doc/source/whatsnew/v1.1.0.rst | 1 + pandas/_libs/window/aggregations.pyx | 61 ++++++++++++++--- pandas/core/generic.py | 2 + pandas/core/window/ewm.py | 99 +++++++++++++++++++++++---- pandas/tests/window/conftest.py | 8 ++- pandas/tests/window/test_ewm.py | 60 +++++++++++++++- 8 files changed, 233 insertions(+), 24 deletions(-) diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index b1f6d052919bd..f0dd908f81043 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -91,11 +91,18 @@ class EWMMethods: def setup(self, constructor, window, dtype, method): N = 10 ** 5 arr = (100 * np.random.random(N)).astype(dtype) + times = pd.date_range("1900", periods=N, freq="23s") self.ewm = getattr(pd, constructor)(arr).ewm(halflife=window) + self.ewm_times = getattr(pd, constructor)(arr).ewm( + halflife="1 Day", times=times + ) def time_ewm(self, constructor, window, dtype, method): getattr(self.ewm, method)() + def time_ewm_times(self, constructor, window, dtype, method): + self.ewm.mean() + class VariableWindowMethods(Methods): params = ( diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst index f36c6e06044f2..d7875e5b8d861 100644 --- a/doc/source/user_guide/computation.rst +++ b/doc/source/user_guide/computation.rst @@ -1095,6 +1095,25 @@ and **alpha** to the EW functions: one half. * **Alpha** specifies the smoothing factor directly. +.. versionadded:: 1.1.0 + +You can also specify ``halflife`` in terms of a timedelta convertible unit to specify the amount of +time it takes for an observation to decay to half its value when also specifying a sequence +of ``times``. + +.. ipython:: python + + df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + df + times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] + df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() + +The following formula is used to compute exponentially weighted mean with an input vector of times: + +.. math:: + + y_t = \frac{\sum_{i=0}^t 0.5^\frac{t_{t} - t_{i}}{\lambda} x_{t-i}}{0.5^\frac{t_{t} - t_{i}}{\lambda}}, + Here is an example for a univariate time series: .. ipython:: python diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 9bd4ddbb624d9..563ae6d1d2ed1 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -329,6 +329,7 @@ Other enhancements - :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`) - :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) +- :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index ec4a412b5adc7..362d0e6263697 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -8,7 +8,7 @@ from libc.stdlib cimport malloc, free import numpy as np cimport numpy as cnp -from numpy cimport ndarray, int64_t, float64_t, float32_t +from numpy cimport ndarray, int64_t, float64_t, float32_t, uint8_t cnp.import_array() @@ -1752,6 +1752,51 @@ def roll_weighted_var(float64_t[:] values, float64_t[:] weights, # ---------------------------------------------------------------------- # Exponentially weighted moving average +def ewma_time(ndarray[float64_t] vals, int minp, ndarray[int64_t] times, + int64_t halflife): + """ + Compute exponentially-weighted moving average using halflife and time + distances. + + Parameters + ---------- + vals : ndarray[float_64] + minp : int + times : ndarray[int64] + halflife : int64 + + Returns + ------- + ndarray + """ + cdef: + Py_ssize_t i, num_not_nan = 0, N = len(vals) + bint is_not_nan + float64_t last_result + ndarray[uint8_t] mask = np.zeros(N, dtype=np.uint8) + ndarray[float64_t] weights, observations, output = np.empty(N, dtype=np.float64) + + if N == 0: + return output + + last_result = vals[0] + + for i in range(N): + is_not_nan = vals[i] == vals[i] + num_not_nan += is_not_nan + if is_not_nan: + mask[i] = 1 + weights = 0.5 ** ((times[i] - times[mask.view(np.bool_)]) / halflife) + observations = vals[mask.view(np.bool_)] + last_result = np.sum(weights * observations) / np.sum(weights) + + if num_not_nan >= minp: + output[i] = last_result + else: + output[i] = NaN + + return output + def ewma(float64_t[:] vals, float64_t com, bint adjust, bint ignore_na, int minp): """ @@ -1761,9 +1806,9 @@ def ewma(float64_t[:] vals, float64_t com, bint adjust, bint ignore_na, int minp ---------- vals : ndarray (float64 type) com : float64 - adjust: int - ignore_na: bool - minp: int + adjust : int + ignore_na : bool + minp : int Returns ------- @@ -1831,10 +1876,10 @@ def ewmcov(float64_t[:] input_x, float64_t[:] input_y, input_x : ndarray (float64 type) input_y : ndarray (float64 type) com : float64 - adjust: int - ignore_na: bool - minp: int - bias: int + adjust : int + ignore_na : bool + minp : int + bias : int Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d892e2487b31c..0b76960558721 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10518,6 +10518,7 @@ def ewm( adjust=True, ignore_na=False, axis=0, + times=None, ): axis = self._get_axis_number(axis) return ExponentialMovingWindow( @@ -10530,6 +10531,7 @@ def ewm( adjust=adjust, ignore_na=ignore_na, axis=axis, + times=times, ) cls.ewm = ewm diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index ee80f80b320e4..7a2d8e84bec76 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -1,18 +1,21 @@ +import datetime from functools import partial from textwrap import dedent from typing import Optional, Union import numpy as np +from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import FrameOrSeries +from pandas._typing import FrameOrSeries, TimedeltaConvertibleTypes from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, doc +from pandas.core.dtypes.common import is_datetime64_ns_dtype from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.base import DataError -import pandas.core.common as com +import pandas.core.common as common from pandas.core.window.common import _doc_template, _shared_docs, zsqrt from pandas.core.window.rolling import _flex_binary_moment, _Rolling @@ -32,7 +35,7 @@ def get_center_of_mass( halflife: Optional[float], alpha: Optional[float], ) -> float: - valid_count = com.count_not_none(comass, span, halflife, alpha) + valid_count = common.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha are mutually exclusive") @@ -76,10 +79,17 @@ class ExponentialMovingWindow(_Rolling): span : float, optional Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. - halflife : float, optional + halflife : float, str, timedelta, optional Specify decay in terms of half-life, :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for :math:`halflife > 0`. + + If ``times`` is specified, the time unit (str or timedelta) over which an + observation decays to half its value. Only applicable to ``mean()`` + and halflife value will not apply to the other functions. + + .. versionadded:: 1.1.0 + alpha : float, optional Specify smoothing factor :math:`\alpha` directly, :math:`0 < \alpha \leq 1`. @@ -124,6 +134,18 @@ class ExponentialMovingWindow(_Rolling): axis : {0, 1}, default 0 The axis to use. The value 0 identifies the rows, and 1 identifies the columns. + times : str, np.ndarray, Series, default None + + .. versionadded:: 1.1.0 + + Times corresponding to the observations. Must be monotonically increasing and + ``datetime64[ns]`` dtype. + + If str, the name of the column in the DataFrame representing the times. + + If 1-D array like, a sequence with the same shape as the observations. + + Only applicable to ``mean()``. Returns ------- @@ -159,6 +181,17 @@ class ExponentialMovingWindow(_Rolling): 2 1.615385 3 1.615385 4 3.670213 + + Specifying ``times`` with a timedelta ``halflife`` when computing mean. + + >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] + >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() + B + 0 0.000000 + 1 0.585786 + 2 1.523889 + 3 1.523889 + 4 3.233686 """ _attributes = ["com", "min_periods", "adjust", "ignore_na", "axis"] @@ -168,20 +201,49 @@ def __init__( obj, com: Optional[float] = None, span: Optional[float] = None, - halflife: Optional[float] = None, + halflife: Optional[Union[float, TimedeltaConvertibleTypes]] = None, alpha: Optional[float] = None, min_periods: int = 0, adjust: bool = True, ignore_na: bool = False, axis: int = 0, + times: Optional[Union[str, np.ndarray, FrameOrSeries]] = None, ): + self.com: Optional[float] self.obj = obj - self.com = get_center_of_mass(com, span, halflife, alpha) self.min_periods = max(int(min_periods), 1) self.adjust = adjust self.ignore_na = ignore_na self.axis = axis self.on = None + if times is not None: + if isinstance(times, str): + times = self._selected_obj[times] + if not is_datetime64_ns_dtype(times): + raise ValueError("times must be datetime64[ns] dtype.") + if len(times) != len(obj): + raise ValueError("times must be the same length as the object.") + if not isinstance(halflife, (str, datetime.timedelta)): + raise ValueError( + "halflife must be a string or datetime.timedelta object" + ) + self.times = np.asarray(times.astype(np.int64)) + self.halflife = Timedelta(halflife).value + # Halflife is no longer applicable when calculating COM + # But allow COM to still be calculated if the user passes other decay args + if common.count_not_none(com, span, alpha) > 0: + self.com = get_center_of_mass(com, span, None, alpha) + else: + self.com = None + else: + if halflife is not None and isinstance(halflife, (str, datetime.timedelta)): + raise ValueError( + "halflife can only be a timedelta convertible argument if " + "times is not None." + ) + self.times = None + self.halflife = None + self.com = get_center_of_mass(com, span, halflife, alpha) @property def _constructor(self): @@ -277,14 +339,23 @@ def mean(self, *args, **kwargs): Arguments and keyword arguments to be passed into func. """ nv.validate_window_func("mean", args, kwargs) - window_func = self._get_roll_func("ewma") - window_func = partial( - window_func, - com=self.com, - adjust=self.adjust, - ignore_na=self.ignore_na, - minp=self.min_periods, - ) + if self.times is not None: + window_func = self._get_roll_func("ewma_time") + window_func = partial( + window_func, + minp=self.min_periods, + times=self.times, + halflife=self.halflife, + ) + else: + window_func = self._get_roll_func("ewma") + window_func = partial( + window_func, + com=self.com, + adjust=self.adjust, + ignore_na=self.ignore_na, + minp=self.min_periods, + ) return self._apply(window_func) @Substitution(name="ewm", func_name="std") diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index 74f3406d30225..eb8252d5731be 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import numpy as np from numpy.random import randn @@ -302,3 +302,9 @@ def series(): def which(request): """Turn parametrized which as fixture for series and frame""" return request.param + + +@pytest.fixture(params=["1 day", timedelta(days=1)]) +def halflife_with_times(request): + """Halflife argument for EWM when times is specified.""" + return request.param diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index 44015597ddb19..12c314d5e9ec9 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -3,7 +3,8 @@ from pandas.errors import UnsupportedFunctionCall -from pandas import DataFrame, Series +from pandas import DataFrame, DatetimeIndex, Series, date_range +import pandas._testing as tm from pandas.core.window import ExponentialMovingWindow @@ -69,3 +70,60 @@ def test_numpy_compat(method): getattr(e, method)(1, 2, 3) with pytest.raises(UnsupportedFunctionCall, match=msg): getattr(e, method)(dtype=np.float64) + + +def test_ewma_times_not_datetime_type(): + msg = r"times must be datetime64\[ns\] dtype." + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(times=np.arange(5)) + + +def test_ewma_times_not_same_length(): + msg = "times must be the same length as the object." + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]")) + + +def test_ewma_halflife_not_correct_type(): + msg = "halflife must be a string or datetime.timedelta object" + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]")) + + +def test_ewma_halflife_without_times(halflife_with_times): + msg = "halflife can only be a timedelta convertible argument if times is not None." + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(halflife=halflife_with_times) + + +@pytest.mark.parametrize( + "times", + [ + np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"), + date_range("2000", freq="D", periods=10), + date_range("2000", freq="D", periods=10).tz_localize("UTC"), + "time_col", + ], +) +@pytest.mark.parametrize("min_periods", [0, 2]) +def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods): + halflife = halflife_with_times + data = np.arange(10) + data[::2] = np.nan + df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)}) + result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean() + expected = df.ewm(halflife=1.0, min_periods=min_periods).mean() + tm.assert_frame_equal(result, expected) + + +def test_ewma_with_times_variable_spacing(tz_aware_fixture): + tz = tz_aware_fixture + halflife = "23 days" + times = DatetimeIndex( + ["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"] + ).tz_localize(tz) + data = np.arange(3) + df = DataFrame(data) + result = df.ewm(halflife=halflife, times=times).mean() + expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459]) + tm.assert_frame_equal(result, expected) From 1d26e3ad4ba12e4e02c4f463f4489d0e8b809419 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 15:51:43 -0700 Subject: [PATCH 047/632] TYP: get_utcoffset (#35107) --- pandas/_libs/tslibs/timezones.pxd | 4 ++-- pandas/_libs/tslibs/timezones.pyx | 4 ++-- pandas/core/arrays/datetimes.py | 3 +-- pandas/tseries/frequencies.py | 5 ++--- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 0784b090b3edb..f51ee41cb99a6 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -1,4 +1,4 @@ -from cpython.datetime cimport tzinfo +from cpython.datetime cimport datetime, timedelta, tzinfo cdef tzinfo utc_pytz @@ -11,7 +11,7 @@ cpdef bint tz_compare(tzinfo start, tzinfo end) cpdef object get_timezone(tzinfo tz) cpdef object maybe_get_tz(object tz) -cdef get_utcoffset(tzinfo tz, obj) +cdef timedelta get_utcoffset(tzinfo tz, datetime obj) cdef bint is_fixed_offset(tzinfo tz) cdef object get_dst_info(tzinfo tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 0460591048801..3b2104f75956a 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,5 +1,5 @@ from datetime import timezone -from cpython.datetime cimport datetime, tzinfo +from cpython.datetime cimport datetime, timedelta, tzinfo # dateutil compat from dateutil.tz import ( @@ -153,7 +153,7 @@ cdef inline object tz_cache_key(tzinfo tz): # UTC Offsets -cdef get_utcoffset(tzinfo tz, obj): +cdef timedelta get_utcoffset(tzinfo tz, datetime obj): try: return tz._utcoffset except AttributeError: diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 64e8582baaa98..fcfbaa4ac2a1c 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -3,7 +3,6 @@ import warnings import numpy as np -from pytz import utc from pandas._libs import lib, tslib from pandas._libs.tslibs import ( @@ -725,7 +724,7 @@ def _local_timestamps(self): This is used to calculate time-of-day information as if the timestamps were timezone-naive. """ - return tzconversion.tz_convert(self.asi8, utc, self.tz) + return tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz) def tz_convert(self, tz): """ diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 7516d9748c18f..f94c8ef6550a5 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -4,7 +4,7 @@ import numpy as np from pandas._libs.algos import unique_deltas -from pandas._libs.tslibs import Timestamp +from pandas._libs.tslibs import Timestamp, tzconversion from pandas._libs.tslibs.ccalendar import ( DAYS, MONTH_ALIASES, @@ -22,7 +22,6 @@ from pandas._libs.tslibs.parsing import get_rule_month from pandas._libs.tslibs.resolution import month_position_check from pandas._libs.tslibs.timezones import UTC -from pandas._libs.tslibs.tzconversion import tz_convert from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( @@ -199,7 +198,7 @@ def __init__(self, index, warn: bool = True): # the timezone so they are in local time if hasattr(index, "tz"): if index.tz is not None: - self.i8values = tz_convert(self.i8values, UTC, index.tz) + self.i8values = tzconversion.tz_convert(self.i8values, UTC, index.tz) self.warn = warn From 8f29758412ca2a110a7b2939e6a8737d9f7f4ec0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 15:59:07 -0700 Subject: [PATCH 048/632] PERF: ints_to_pydatetime (#35113) --- pandas/_libs/tslib.pyx | 91 +++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 55 deletions(-) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 8d8a62a58f25f..4fda44e766109 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -15,7 +15,7 @@ PyDateTime_IMPORT cimport numpy as cnp -from numpy cimport float64_t, int64_t, ndarray, uint8_t +from numpy cimport float64_t, int64_t, ndarray, uint8_t, intp_t import numpy as np cnp.import_array() @@ -157,13 +157,15 @@ def ints_to_pydatetime( Py_ssize_t i, n = len(arr) ndarray[int64_t] trans int64_t[:] deltas - Py_ssize_t pos + intp_t[:] pos npy_datetimestruct dts object dt, new_tz str typ - int64_t value, delta, local_value + int64_t value, local_value, delta = NPY_NAT # dummy for delta ndarray[object] result = np.empty(n, dtype=object) object (*func_create)(int64_t, npy_datetimestruct, tzinfo, object, bint) + bint use_utc = False, use_tzlocal = False, use_fixed = False + bint use_pytz = False if box == "date": assert (tz is None), "tz should be None when converting to date" @@ -184,66 +186,45 @@ def ints_to_pydatetime( ) if is_utc(tz) or tz is None: - for i in range(n): - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - dt64_to_dtstruct(value, &dts) - result[i] = func_create(value, dts, tz, freq, fold) + use_utc = True elif is_tzlocal(tz): - for i in range(n): - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - # Python datetime objects do not support nanosecond - # resolution (yet, PEP 564). Need to compute new value - # using the i8 representation. - local_value = tz_convert_utc_to_tzlocal(value, tz) - dt64_to_dtstruct(local_value, &dts) - result[i] = func_create(value, dts, tz, freq, fold) + use_tzlocal = True else: trans, deltas, typ = get_dst_info(tz) - - if typ not in ['pytz', 'dateutil']: + if typ not in ["pytz", "dateutil"]: # static/fixed; in this case we know that len(delta) == 1 + use_fixed = True delta = deltas[0] - for i in range(n): - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - # Adjust datetime64 timestamp, recompute datetimestruct - dt64_to_dtstruct(value + delta, &dts) - result[i] = func_create(value, dts, tz, freq, fold) + else: + pos = trans.searchsorted(arr, side="right") - 1 + use_pytz = typ == "pytz" - elif typ == 'dateutil': - # no zone-name change for dateutil tzs - dst etc - # represented in single object. - for i in range(n): - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(value, side='right') - 1 - dt64_to_dtstruct(value + deltas[pos], &dts) - result[i] = func_create(value, dts, tz, freq, fold) + for i in range(n): + new_tz = tz + value = arr[i] + + if value == NPY_NAT: + result[i] = NaT else: - # pytz - for i in range(n): - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(value, side='right') - 1 - # find right representation of dst etc in pytz timezone - new_tz = tz._tzinfos[tz._transition_info[pos]] + if use_utc: + local_value = value + elif use_tzlocal: + local_value = tz_convert_utc_to_tzlocal(value, tz) + elif use_fixed: + local_value = value + delta + elif not use_pytz: + # i.e. dateutil + # no zone-name change for dateutil tzs - dst etc + # represented in single object. + local_value = value + deltas[pos[i]] + else: + # pytz + # find right representation of dst etc in pytz timezone + new_tz = tz._tzinfos[tz._transition_info[pos[i]]] + local_value = value + deltas[pos[i]] - dt64_to_dtstruct(value + deltas[pos], &dts) - result[i] = func_create(value, dts, new_tz, freq, fold) + dt64_to_dtstruct(local_value, &dts) + result[i] = func_create(value, dts, new_tz, freq, fold) return result From 8590bd5b83c1c5a60c7de2ced072fc962d23a254 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 16:00:00 -0700 Subject: [PATCH 049/632] CLN: unused imports in tslibs (#35133) --- pandas/_libs/tslibs/offsets.pyx | 16 +--------------- pandas/_libs/tslibs/timestamps.pyx | 4 ++-- 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index df43ebcfd9df2..e4d05e0d70e2f 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -51,7 +51,6 @@ from pandas._libs.tslibs.timezones cimport utc_pytz as UTC from pandas._libs.tslibs.tzconversion cimport tz_convert_single from .dtypes cimport PeriodDtypeCode -from .fields import get_start_end_field from .timedeltas cimport delta_to_nanoseconds from .timedeltas import Timedelta from .timestamps cimport _Timestamp @@ -99,12 +98,6 @@ def apply_index_wraps(func): # do @functools.wraps(func) manually since it doesn't work on cdef funcs wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ - try: - wrapper.__module__ = func.__module__ - except AttributeError: - # AttributeError: 'method_descriptor' object has no - # attribute '__module__' - pass return wrapper @@ -159,12 +152,6 @@ def apply_wraps(func): # do @functools.wraps(func) manually since it doesn't work on cdef funcs wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ - try: - wrapper.__module__ = func.__module__ - except AttributeError: - # AttributeError: 'method_descriptor' object has no - # attribute '__module__' - pass return wrapper @@ -355,8 +342,7 @@ class ApplyTypeError(TypeError): cdef class BaseOffset: """ - Base class for DateOffset methods that are not overridden by subclasses - and will (after pickle errors are resolved) go into a cdef class. + Base class for DateOffset methods that are not overridden by subclasses. """ _day_opt = None _attributes = tuple(["n", "normalize"]) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b79a5c49bdd10..9035d1bb2f643 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -14,7 +14,7 @@ from numpy cimport int64_t, int8_t, uint8_t, ndarray cnp.import_array() from cpython.object cimport (PyObject_RichCompareBool, PyObject_RichCompare, - Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE) + Py_EQ, Py_NE) from cpython.datetime cimport ( datetime, @@ -51,7 +51,7 @@ from pandas._libs.tslibs.np_datetime cimport ( pydatetime_to_dt64, ) from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime -from pandas._libs.tslibs.offsets cimport to_offset, is_tick_object, is_offset_object +from pandas._libs.tslibs.offsets cimport to_offset, is_offset_object from pandas._libs.tslibs.timedeltas cimport is_any_td_scalar, delta_to_nanoseconds from pandas._libs.tslibs.timedeltas import Timedelta from pandas._libs.tslibs.timezones cimport ( From 85c02bcad5a87cb137602054a365d90c5cda98dc Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 6 Jul 2020 18:25:27 -0500 Subject: [PATCH 050/632] DEPR: Deprecate n-dim indexing for Series (#35141) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/indexers.py | 10 +++++----- pandas/core/series.py | 12 ++++-------- pandas/tests/indexes/common.py | 2 +- pandas/tests/indexes/datetimes/test_indexing.py | 4 ++-- pandas/tests/indexes/interval/test_base.py | 2 +- pandas/tests/indexes/test_base.py | 4 ++-- pandas/tests/series/indexing/test_getitem.py | 12 +++++++----- 8 files changed, 23 insertions(+), 24 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 563ae6d1d2ed1..2bb933f6bdb60 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -789,6 +789,7 @@ Deprecations - :meth:`Categorical.to_dense` is deprecated and will be removed in a future version, use ``np.asarray(cat)`` instead (:issue:`32639`) - The ``fastpath`` keyword in the ``SingleBlockManager`` constructor is deprecated and will be removed in a future version (:issue:`33092`) - Providing ``suffixes`` as a ``set`` in :func:`pandas.merge` is deprecated. Provide a tuple instead (:issue:`33740`, :issue:`34741`). +- Indexing a series with a multi-dimensional indexer like ``[:, None]`` to return an ndarray now raises a ``FutureWarning``. Convert to a NumPy array before indexing instead (:issue:`27837`) - :meth:`Index.is_mixed` is deprecated and will be removed in a future version, check ``index.inferred_type`` directly instead (:issue:`32922`) - Passing any arguments but the first one to :func:`read_html` as diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index 6dbcfef46fa98..d9aa02db3e42a 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -295,7 +295,7 @@ def length_of_indexer(indexer, target=None) -> int: raise AssertionError("cannot find the length of the indexer") -def deprecate_ndim_indexing(result): +def deprecate_ndim_indexing(result, stacklevel=3): """ Helper function to raise the deprecation warning for multi-dimensional indexing on 1D Series/Index. @@ -306,11 +306,11 @@ def deprecate_ndim_indexing(result): """ if np.ndim(result) > 1: warnings.warn( - "Support for multi-dimensional indexing (e.g. `index[:, None]`) " - "on an Index is deprecated and will be removed in a future " + "Support for multi-dimensional indexing (e.g. `obj[:, None]`) " + "is deprecated and will be removed in a future " "version. Convert to a numpy array before indexing instead.", - DeprecationWarning, - stacklevel=3, + FutureWarning, + stacklevel=stacklevel, ) diff --git a/pandas/core/series.py b/pandas/core/series.py index be4099d56d43a..6c1d21e4526cf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -78,7 +78,7 @@ sanitize_array, ) from pandas.core.generic import NDFrame -from pandas.core.indexers import unpack_1tuple +from pandas.core.indexers import deprecate_ndim_indexing, unpack_1tuple from pandas.core.indexes.accessors import CombinedDatetimelikeProperties from pandas.core.indexes.api import Float64Index, Index, MultiIndex, ensure_index import pandas.core.indexes.base as ibase @@ -950,13 +950,9 @@ def _get_with(self, key): def _get_values_tuple(self, key): # mpl hackaround if com.any_none(*key): - # suppress warning from slicing the index with a 2d indexer. - # eventually we'll want Series itself to warn. - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", "Support for multi-dim", DeprecationWarning - ) - return self._get_values(key) + result = self._get_values(key) + deprecate_ndim_indexing(result, stacklevel=5) + return result if not isinstance(self.index, MultiIndex): raise ValueError("Can only tuple-index with a MultiIndex") diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 30c58506f619d..c8b780455f862 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -855,7 +855,7 @@ def test_engine_reference_cycle(self): def test_getitem_2d_deprecated(self): # GH#30588 idx = self.create_index() - with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res = idx[:, None] assert isinstance(res, np.ndarray), type(res) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index b1faaa2115f55..6d6193ceaf27d 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -95,7 +95,7 @@ def test_dti_business_getitem(self): def test_dti_business_getitem_matplotlib_hackaround(self): rng = pd.bdate_range(START, END) - with tm.assert_produces_warning(DeprecationWarning): + with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated values = rng[:, None] expected = rng.values[:, None] @@ -122,7 +122,7 @@ def test_dti_custom_getitem(self): def test_dti_custom_getitem_matplotlib_hackaround(self): rng = pd.bdate_range(START, END, freq="C") - with tm.assert_produces_warning(DeprecationWarning): + with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated values = rng[:, None] expected = rng.values[:, None] diff --git a/pandas/tests/indexes/interval/test_base.py b/pandas/tests/indexes/interval/test_base.py index 891640234d26e..c316655fbda8a 100644 --- a/pandas/tests/indexes/interval/test_base.py +++ b/pandas/tests/indexes/interval/test_base.py @@ -84,5 +84,5 @@ def test_getitem_2d_deprecated(self): # GH#30588 multi-dim indexing is deprecated, but raising is also acceptable idx = self.create_index() with pytest.raises(ValueError, match="multi-dimensional indexing not allowed"): - with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): idx[:, None] diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 099c7ced5e2ce..eaf48421dc071 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -56,7 +56,7 @@ def test_can_hold_identifiers(self): @pytest.mark.parametrize("index", ["datetime"], indirect=True) def test_new_axis(self, index): - with tm.assert_produces_warning(DeprecationWarning): + with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated new_index = index[None, :] assert new_index.ndim == 2 @@ -2531,7 +2531,7 @@ def test_shape_of_invalid_index(): # that the returned shape is consistent with this underlying array for # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775) idx = pd.Index([0, 1, 2, 3]) - with tm.assert_produces_warning(DeprecationWarning): + with tm.assert_produces_warning(FutureWarning): # GH#30588 multi-dimensional indexing deprecated assert idx[:, None].shape == (4, 1) diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index 164c63483f71f..6b7cda89a4714 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -51,11 +51,7 @@ class TestSeriesGetitemSlices: def test_getitem_slice_2d(self, datetime_series): # GH#30588 multi-dimensional indexing deprecated - # This is currently failing because the test was relying on - # the DeprecationWarning coming through Index.__getitem__. - # We want to implement a warning specifically for Series.__getitem__ - # at which point this will become a Deprecation/FutureWarning - with tm.assert_produces_warning(None): + with tm.assert_produces_warning(FutureWarning): # GH#30867 Don't want to support this long-term, but # for now ensure that the warning from Index # doesn't comes through via Series.__getitem__. @@ -135,3 +131,9 @@ def test_getitem_generator(string_series): expected = string_series[string_series > 0] tm.assert_series_equal(result, expected) tm.assert_series_equal(result2, expected) + + +def test_getitem_ndim_deprecated(): + s = pd.Series([0, 1]) + with tm.assert_produces_warning(FutureWarning): + s[:, None] From 78de4ce02f89bcc21acf30f33b68037e7a5aaf4a Mon Sep 17 00:00:00 2001 From: MBrouns Date: Tue, 7 Jul 2020 01:27:26 +0200 Subject: [PATCH 051/632] TST: Add test to verify align behaviour on CategoricalIndex (#34880) --- pandas/tests/frame/methods/test_align.py | 33 ++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/pandas/tests/frame/methods/test_align.py b/pandas/tests/frame/methods/test_align.py index 5dae719283d17..d19b59debfdea 100644 --- a/pandas/tests/frame/methods/test_align.py +++ b/pandas/tests/frame/methods/test_align.py @@ -129,6 +129,39 @@ def test_align_mixed_int(self, mixed_int_frame): ) tm.assert_index_equal(bf.index, Index([])) + @pytest.mark.parametrize( + "l_ordered,r_ordered,expected", + [ + [True, True, pd.CategoricalIndex], + [True, False, pd.Index], + [False, True, pd.Index], + [False, False, pd.CategoricalIndex], + ], + ) + def test_align_categorical(self, l_ordered, r_ordered, expected): + # GH-28397 + df_1 = DataFrame( + { + "A": np.arange(6, dtype="int64"), + "B": Series(list("aabbca")).astype( + pd.CategoricalDtype(list("cab"), ordered=l_ordered) + ), + } + ).set_index("B") + df_2 = DataFrame( + { + "A": np.arange(5, dtype="int64"), + "B": Series(list("babca")).astype( + pd.CategoricalDtype(list("cab"), ordered=r_ordered) + ), + } + ).set_index("B") + + aligned_1, aligned_2 = df_1.align(df_2) + assert isinstance(aligned_1.index, expected) + assert isinstance(aligned_2.index, expected) + tm.assert_index_equal(aligned_1.index, aligned_2.index) + def test_align_multiindex(self): # GH#10665 # same test cases as test_align_multiindex in test_series.py From 9fcebee889cda05c90a61ff306b8ede01aa3d1af Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 6 Jul 2020 18:28:05 -0500 Subject: [PATCH 052/632] Move mark registration (#35146) --- pandas/conftest.py | 13 +++++++++++++ setup.cfg | 7 ------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/pandas/conftest.py b/pandas/conftest.py index 5fe4cc45b0006..e0adb37e7d2f5 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -44,6 +44,19 @@ # Configuration / Settings # ---------------------------------------------------------------- # pytest +def pytest_configure(config): + # Register marks to avoid warnings in pandas.test() + # sync with setup.cfg + config.addinivalue_line("markers", "single: mark a test as single cpu only") + config.addinivalue_line("markers", "slow: mark a test as slow") + config.addinivalue_line("markers", "network: mark a test as network") + config.addinivalue_line( + "markers", "db: tests requiring a database (mysql or postgres)" + ) + config.addinivalue_line("markers", "high_memory: mark a test as a high-memory only") + config.addinivalue_line("markers", "clipboard: mark a pd.read_clipboard test") + + def pytest_addoption(parser): parser.addoption("--skip-slow", action="store_true", help="skip slow tests") parser.addoption("--skip-network", action="store_true", help="skip network tests") diff --git a/setup.cfg b/setup.cfg index 49a57b7a525f0..074b0b6bdff71 100644 --- a/setup.cfg +++ b/setup.cfg @@ -54,13 +54,6 @@ exclude = # sync minversion with setup.cfg & install.rst minversion = 4.0.2 testpaths = pandas -markers = - single: mark a test as single cpu only - slow: mark a test as slow - network: mark a test as network - db: tests requiring a database (mysql or postgres) - high_memory: mark a test as a high-memory only - clipboard: mark a pd.read_clipboard test doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL ELLIPSIS addopts = --strict-data-files xfail_strict = True From 10024cd10a37e4cb8f962a0730dc2981fda11808 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 17:07:34 -0700 Subject: [PATCH 053/632] REF: standardize tz_convert_single usage (#35102) --- pandas/_libs/tslibs/timestamps.pyx | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 9035d1bb2f643..b670491063616 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1380,7 +1380,7 @@ default 'raise' cdef: npy_datetimestruct dts - int64_t value, value_tz + int64_t value object k, v datetime ts_input tzinfo_type tzobj @@ -1389,8 +1389,7 @@ default 'raise' tzobj = self.tzinfo value = self.value if tzobj is not None: - value_tz = tz_convert_single(value, tzobj, UTC) - value += value - value_tz + value = tz_convert_single(value, UTC, tzobj) # setup components dt64_to_dtstruct(value, &dts) From 79700694ecbf418b3a641443b03408ec38573b1d Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 17:11:59 -0700 Subject: [PATCH 054/632] CLN: tz_convert is always from UTC (#35104) --- pandas/_libs/tslibs/tzconversion.pyx | 5 ++-- pandas/tests/tslibs/test_conversion.py | 33 +++++++++++++++++--------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index d1d6bc40ef288..dc01210f2789f 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -388,8 +388,9 @@ def tz_convert(int64_t[:] vals, tzinfo tz1, tzinfo tz2): bint to_utc = is_utc(tz2) tzinfo tz - # See GH#17734 We should always be converting either from UTC or to UTC - assert is_utc(tz1) or to_utc + # See GH#17734 We should always be converting from UTC; otherwise + # should use tz_localize_to_utc. + assert is_utc(tz1) if len(vals) == 0: return np.array([], dtype=np.int64) diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index 3a7e06fb14a5f..5a16fea47e90d 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -20,33 +20,47 @@ def f(x): tm.assert_numpy_array_equal(result, expected) -def _compare_local_to_utc(tz_didx, utc_didx): +def _compare_local_to_utc(tz_didx, naive_didx): + # Check that tz_localize behaves the same vectorized and pointwise. def f(x): return tzconversion.tz_convert_single(x, tz_didx.tz, UTC) - result = tzconversion.tz_convert(utc_didx.asi8, tz_didx.tz, UTC) - expected = np.vectorize(f)(utc_didx.asi8) + err1 = err2 = None + try: + result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz) + err1 = None + except Exception as err: + err1 = err - tm.assert_numpy_array_equal(result, expected) + try: + expected = naive_didx.map(lambda x: x.tz_localize(tz_didx.tz)).asi8 + except Exception as err: + err2 = err + + if err1 is not None: + assert type(err1) == type(err2) + else: + assert err2 is None + tm.assert_numpy_array_equal(result, expected) def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture): tz = tz_aware_fixture tz_didx = date_range("2014-03-01", "2015-01-10", freq="H", tz=tz) - utc_didx = date_range("2014-03-01", "2015-01-10", freq="H") + naive_didx = date_range("2014-03-01", "2015-01-10", freq="H") _compare_utc_to_local(tz_didx) - _compare_local_to_utc(tz_didx, utc_didx) + _compare_local_to_utc(tz_didx, naive_didx) @pytest.mark.parametrize("freq", ["D", "A"]) def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): tz = tz_aware_fixture tz_didx = date_range("2000-01-01", "2020-01-01", freq=freq, tz=tz) - utc_didx = date_range("2000-01-01", "2020-01-01", freq=freq) + naive_didx = date_range("2000-01-01", "2020-01-01", freq=freq) _compare_utc_to_local(tz_didx) - _compare_local_to_utc(tz_didx, utc_didx) + _compare_local_to_utc(tz_didx, naive_didx) @pytest.mark.parametrize( @@ -57,9 +71,6 @@ def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): ], ) def test_tz_convert_corner(arr): - result = tzconversion.tz_convert(arr, timezones.maybe_get_tz("US/Eastern"), UTC) - tm.assert_numpy_array_equal(result, arr) - result = tzconversion.tz_convert(arr, UTC, timezones.maybe_get_tz("Asia/Tokyo")) tm.assert_numpy_array_equal(result, arr) From d90ca450709b1ce2aa6ed25fcedc38aae6f8c234 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 17:13:26 -0700 Subject: [PATCH 055/632] PERF: tz_localize(None) from dst, asvs (#35106) * PERF: avoid copy in tz_convert dst cases * ASVs * asv fixup --- asv_bench/benchmarks/tslibs/tz_convert.py | 30 +++++++++++++ pandas/_libs/tslibs/tzconversion.pyx | 55 +++++++++++++++-------- 2 files changed, 67 insertions(+), 18 deletions(-) create mode 100644 asv_bench/benchmarks/tslibs/tz_convert.py diff --git a/asv_bench/benchmarks/tslibs/tz_convert.py b/asv_bench/benchmarks/tslibs/tz_convert.py new file mode 100644 index 0000000000000..2a1f559bdf6d4 --- /dev/null +++ b/asv_bench/benchmarks/tslibs/tz_convert.py @@ -0,0 +1,30 @@ +import numpy as np +from pytz import UTC + +from pandas._libs.tslibs.tzconversion import tz_convert, tz_localize_to_utc + +from .tslib import _sizes, _tzs + + +class TimeTZConvert: + params = ( + _sizes, + [x for x in _tzs if x is not None], + ) + param_names = ["size", "tz"] + + def setup(self, size, tz): + arr = np.random.randint(0, 10, size=size, dtype="i8") + self.i8data = arr + + def time_tz_convert_from_utc(self, size, tz): + # effectively: + # dti = DatetimeIndex(self.i8data, tz=tz) + # dti.tz_localize(None) + tz_convert(self.i8data, UTC, tz) + + def time_tz_localize_to_utc(self, size, tz): + # effectively: + # dti = DatetimeIndex(self.i8data) + # dti.tz_localize(tz, ambiguous="NaT", nonexistent="NaT") + tz_localize_to_utc(self.i8data, tz, ambiguous="NaT", nonexistent="NaT") diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index dc01210f2789f..273781ee34f0a 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -552,29 +552,48 @@ cdef int64_t[:] _tz_convert_dst( int64_t[:] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans int64_t[:] deltas - int64_t v + int64_t v, delta + str typ # tz is assumed _not_ to be tzlocal; that should go # through _tz_convert_tzlocal_utc - trans, deltas, _ = get_dst_info(tz) - if not to_utc: - # We add `offset` below instead of subtracting it - deltas = -1 * np.array(deltas, dtype='i8') + trans, deltas, typ = get_dst_info(tz) - # Previously, this search was done pointwise to try and benefit - # from getting to skip searches for iNaTs. However, it seems call - # overhead dominates the search time so doing it once in bulk - # is substantially faster (GH#24603) - pos = trans.searchsorted(values, side='right') - 1 + if typ not in ["pytz", "dateutil"]: + # FixedOffset, we know len(deltas) == 1 + delta = deltas[0] - for i in range(n): - v = values[i] - if v == NPY_NAT: - result[i] = v - else: - if pos[i] < 0: - raise ValueError('First time before start of DST info') - result[i] = v - deltas[pos[i]] + for i in range(n): + v = values[i] + if v == NPY_NAT: + result[i] = v + else: + if to_utc: + result[i] = v - delta + else: + result[i] = v + delta + + else: + # Previously, this search was done pointwise to try and benefit + # from getting to skip searches for iNaTs. However, it seems call + # overhead dominates the search time so doing it once in bulk + # is substantially faster (GH#24603) + pos = trans.searchsorted(values, side="right") - 1 + + for i in range(n): + v = values[i] + if v == NPY_NAT: + result[i] = v + else: + if pos[i] < 0: + # TODO: How is this reached? Should we be checking for + # it elsewhere? + raise ValueError("First time before start of DST info") + + if to_utc: + result[i] = v - deltas[pos[i]] + else: + result[i] = v + deltas[pos[i]] return result From 74e0cde731251b26ca1a1195765d6b42f4899acf Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 17:14:26 -0700 Subject: [PATCH 056/632] REF: implement tz_localize_to_utc_single (#35108) * REF: implement tz_localize_to_utc_single * docstring --- pandas/_libs/tslib.pyx | 7 ++--- pandas/_libs/tslibs/conversion.pyx | 9 +++--- pandas/_libs/tslibs/timestamps.pyx | 12 ++++---- pandas/_libs/tslibs/tzconversion.pxd | 3 ++ pandas/_libs/tslibs/tzconversion.pyx | 42 ++++++++++++++++++++++++++-- 5 files changed, 57 insertions(+), 16 deletions(-) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4fda44e766109..3472dbf161b8e 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -46,7 +46,6 @@ from pandas._libs.tslibs.timezones cimport ( get_dst_info, is_utc, is_tzlocal, - utc_pytz as UTC, ) from pandas._libs.tslibs.conversion cimport ( _TSObject, @@ -67,8 +66,8 @@ from pandas._libs.tslibs.timestamps cimport create_timestamp_from_ts, _Timestamp from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.tzconversion cimport ( - tz_convert_single, tz_convert_utc_to_tzlocal, + tz_localize_to_utc_single, ) # Note: this is the only non-tslibs intra-pandas dependency here @@ -250,7 +249,7 @@ def _test_parse_iso8601(ts: str): check_dts_bounds(&obj.dts) if out_local == 1: obj.tzinfo = pytz.FixedOffset(out_tzoffset) - obj.value = tz_convert_single(obj.value, obj.tzinfo, UTC) + obj.value = tz_localize_to_utc_single(obj.value, obj.tzinfo) return Timestamp(obj.value, tz=obj.tzinfo) else: return Timestamp(obj.value) @@ -708,7 +707,7 @@ cpdef array_to_datetime( # dateutil.tz.tzoffset objects out_tzoffset_vals.add(out_tzoffset * 60.) tz = pytz.FixedOffset(out_tzoffset) - value = tz_convert_single(value, tz, UTC) + value = tz_localize_to_utc_single(value, tz) out_local = 0 out_tzoffset = 0 else: diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index a1f074b1b29a8..36a4a1f60d8b9 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -39,10 +39,9 @@ from pandas._libs.tslibs.nattype cimport ( c_nat_strings as nat_strings, ) -from pandas._libs.tslibs.tzconversion import tz_localize_to_utc from pandas._libs.tslibs.tzconversion cimport ( tz_convert_utc_to_tzlocal, - tz_convert_single, + tz_localize_to_utc_single, ) # ---------------------------------------------------------------------- @@ -481,7 +480,7 @@ cdef _TSObject _create_tsobject_tz_using_offset(npy_datetimestruct dts, value = dtstruct_to_dt64(&dts) obj.dts = dts obj.tzinfo = pytz.FixedOffset(tzoffset) - obj.value = tz_convert_single(value, obj.tzinfo, UTC) + obj.value = tz_localize_to_utc_single(value, obj.tzinfo) if tz is None: check_overflows(obj) return obj @@ -567,8 +566,8 @@ cdef _TSObject _convert_str_to_tsobject(object ts, tzinfo tz, str unit, ts = dtstruct_to_dt64(&dts) if tz is not None: # shift for _localize_tso - ts = tz_localize_to_utc(np.array([ts], dtype='i8'), tz, - ambiguous='raise')[0] + ts = tz_localize_to_utc_single(ts, tz, + ambiguous="raise") except OutOfBoundsDatetime: # GH#19382 for just-barely-OutOfBounds falling back to dateutil diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b670491063616..a2dacd9d36b14 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -58,8 +58,10 @@ from pandas._libs.tslibs.timezones cimport ( is_utc, maybe_get_tz, treat_tz_as_pytz, utc_pytz as UTC, get_timezone, tz_compare, ) -from pandas._libs.tslibs.tzconversion cimport tz_convert_single -from pandas._libs.tslibs.tzconversion import tz_localize_to_utc +from pandas._libs.tslibs.tzconversion cimport ( + tz_convert_single, + tz_localize_to_utc_single, +) # ---------------------------------------------------------------------- # Constants @@ -1300,9 +1302,9 @@ default 'raise' tz = maybe_get_tz(tz) if not isinstance(ambiguous, str): ambiguous = [ambiguous] - value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz, - ambiguous=ambiguous, - nonexistent=nonexistent)[0] + value = tz_localize_to_utc_single(self.value, tz, + ambiguous=ambiguous, + nonexistent=nonexistent) return Timestamp(value, tz=tz, freq=self.freq) else: if tz is None: diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 7f445d7549f45..7d102868256de 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -4,3 +4,6 @@ from numpy cimport int64_t cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=*) cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2) +cdef int64_t tz_localize_to_utc_single( + int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=* +) except? -1 diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 273781ee34f0a..98c40e109dbab 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -20,10 +20,48 @@ from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, HOUR_NANOS from pandas._libs.tslibs.nattype cimport NPY_NAT from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) -from pandas._libs.tslibs.timezones cimport get_dst_info, is_tzlocal, is_utc +from pandas._libs.tslibs.timezones cimport ( + get_dst_info, + get_utcoffset, + is_fixed_offset, + is_tzlocal, + is_utc, +) + + +cdef int64_t tz_localize_to_utc_single( + int64_t val, tzinfo tz, object ambiguous=None, object nonexistent=None, +) except? -1: + """See tz_localize_to_utc.__doc__""" + cdef: + int64_t delta + int64_t[:] deltas + + if val == NPY_NAT: + return val + + elif is_utc(tz) or tz is None: + return val + + elif is_tzlocal(tz): + return _tz_convert_tzlocal_utc(val, tz, to_utc=True) + + elif is_fixed_offset(tz): + # TODO: in this case we should be able to use get_utcoffset, + # that returns None for e.g. 'dateutil//usr/share/zoneinfo/Etc/GMT-9' + _, deltas, _ = get_dst_info(tz) + delta = deltas[0] + return val - delta + + else: + return tz_localize_to_utc( + np.array([val], dtype="i8"), + tz, + ambiguous=ambiguous, + nonexistent=nonexistent, + )[0] -# TODO: cdef scalar version to call from convert_str_to_tsobject @cython.boundscheck(False) @cython.wraparound(False) def tz_localize_to_utc(ndarray[int64_t] vals, tzinfo tz, object ambiguous=None, From a207fb1c542eed90b0970457d90ccd221768366d Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 7 Jul 2020 01:39:25 +0100 Subject: [PATCH 057/632] no need to do a circular import (#35128) --- pandas/core/generic.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0b76960558721..571fcc67f3bb5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5366,9 +5366,8 @@ def dtypes(self): string object dtype: object """ - from pandas import Series # noqa: F811 - - return Series(self._mgr.get_dtypes(), index=self._info_axis, dtype=np.object_) + data = self._mgr.get_dtypes() + return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def _to_dict_of_blocks(self, copy: bool_t = True): """ From c2dfde4c13b1db0cc99fd6b324512106a6157cc0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 6 Jul 2020 17:44:20 -0700 Subject: [PATCH 058/632] REF: make ccalendar self-contained (#35119) --- pandas/_libs/tslibs/ccalendar.pyx | 23 ----------------------- pandas/_libs/tslibs/fields.pyx | 26 +++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 26 deletions(-) diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 9f8cf6c28adab..de8fd3911e946 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -7,11 +7,6 @@ import cython from numpy cimport int64_t, int32_t -from locale import LC_TIME - -from pandas._config.localization import set_locale -from pandas._libs.tslibs.strptime import LocaleTime - # ---------------------------------------------------------------------- # Constants @@ -246,21 +241,3 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil: day_of_year = mo_off + day return day_of_year - - -def get_locale_names(name_type: str, locale: object = None): - """ - Returns an array of localized day or month names. - - Parameters - ---------- - name_type : string, attribute of LocaleTime() in which to return localized - names - locale : string - - Returns - ------- - list of locale names - """ - with set_locale(locale, LC_TIME): - return getattr(LocaleTime(), name_type) diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 126deb67e4189..2351aca749dcc 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -2,6 +2,7 @@ Functions for accessing attributes of Timestamp/datetime64/datetime-like objects and arrays """ +from locale import LC_TIME import cython from cython import Py_ssize_t @@ -11,9 +12,9 @@ cimport numpy as cnp from numpy cimport ndarray, int64_t, int32_t, int8_t, uint32_t cnp.import_array() -from pandas._libs.tslibs.ccalendar import ( - get_locale_names, MONTHS_FULL, DAYS_FULL, -) +from pandas._config.localization import set_locale + +from pandas._libs.tslibs.ccalendar import MONTHS_FULL, DAYS_FULL from pandas._libs.tslibs.ccalendar cimport ( DAY_NANOS, get_days_in_month, is_leapyear, dayofweek, get_week_of_year, @@ -24,6 +25,7 @@ from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, td64_to_tdstruct) from pandas._libs.tslibs.nattype cimport NPY_NAT +from pandas._libs.tslibs.strptime import LocaleTime def get_time_micros(const int64_t[:] dtindex): @@ -704,3 +706,21 @@ def build_isocalendar_sarray(const int64_t[:] dtindex): iso_weeks[i] = ret_val[1] days[i] = ret_val[2] return out + + +def get_locale_names(name_type: str, locale: object = None): + """ + Returns an array of localized day or month names. + + Parameters + ---------- + name_type : string, attribute of LocaleTime() in which to return localized + names + locale : string + + Returns + ------- + list of locale names + """ + with set_locale(locale, LC_TIME): + return getattr(LocaleTime(), name_type) From 148f9fd74fc71cb7509c0898883036316efc6f89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Guillem=20S=C3=A1nchez?= Date: Tue, 7 Jul 2020 13:09:09 +0200 Subject: [PATCH 059/632] Added missing import in boxplot_frame_groupby example (#34343) --- pandas/plotting/_core.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 3a8cc5c299640..45a3818492b44 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -567,17 +567,25 @@ def boxplot_frame_groupby( Examples -------- - >>> import itertools - >>> tuples = [t for t in itertools.product(range(1000), range(4))] - >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) - >>> data = np.random.randn(len(index),4) - >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) - >>> - >>> grouped = df.groupby(level='lvl1') - >>> boxplot_frame_groupby(grouped) - >>> - >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) - >>> boxplot_frame_groupby(grouped, subplots=False) + You can create boxplots for grouped data and show them as separate subplots: + + .. plot:: + :context: close-figs + + >>> import itertools + >>> tuples = [t for t in itertools.product(range(1000), range(4))] + >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) + >>> data = np.random.randn(len(index),4) + >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) + >>> grouped = df.groupby(level='lvl1') + >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10)) + + The ``subplots=False`` option shows the boxplots in a single figure. + + .. plot:: + :context: close-figs + + >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) """ plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame_groupby( From acedb80fd5e974628b619ba4c75db24845b89fed Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Tue, 7 Jul 2020 08:10:06 -0500 Subject: [PATCH 060/632] CI: move py36 slow to azure (#34776) (#35151) --- .travis.yml | 6 ------ ci/azure/posix.yml | 5 +++++ ci/deps/{travis-36-slow.yaml => azure-36-slow.yaml} | 0 ci/setup_env.sh | 1 - 4 files changed, 5 insertions(+), 7 deletions(-) rename ci/deps/{travis-36-slow.yaml => azure-36-slow.yaml} (100%) diff --git a/.travis.yml b/.travis.yml index fdea9876d5d89..b016cf386098e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -58,12 +58,6 @@ matrix: services: - mysql - postgresql - - - env: - - JOB="3.6, slow" ENV_FILE="ci/deps/travis-36-slow.yaml" PATTERN="slow" SQL="1" - services: - - mysql - - postgresql allow_failures: - arch: arm64 env: diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index 880fdc46f43f5..f716974f6add1 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -30,6 +30,11 @@ jobs: LC_ALL: "zh_CN.utf8" EXTRA_APT: "language-pack-zh-hans" + py36_slow: + ENV_FILE: ci/deps/azure-36-slow.yaml + CONDA_PY: "36" + PATTERN: "slow" + py36_locale: ENV_FILE: ci/deps/azure-36-locale.yaml CONDA_PY: "36" diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/azure-36-slow.yaml similarity index 100% rename from ci/deps/travis-36-slow.yaml rename to ci/deps/azure-36-slow.yaml diff --git a/ci/setup_env.sh b/ci/setup_env.sh index 4d551294dbb21..aa43d8b7dd00a 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -166,5 +166,4 @@ if [[ -n ${SQL:0} ]]; then else echo "not using dbs on non-linux Travis builds or Azure Pipelines" fi - echo "done" From 0dffbdf7a308e38f2a4bbab82f657b7eab57302a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 7 Jul 2020 06:14:28 -0700 Subject: [PATCH 061/632] BUG: get_loc with time object matching NaT micros (#35114) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/_libs/tslibs/fields.pyx | 22 ------------------ pandas/core/indexes/datetimes.py | 23 ++++++++++++++----- .../tests/indexes/datetimes/test_indexing.py | 10 ++++++++ 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2bb933f6bdb60..cee41f248fc60 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -978,6 +978,7 @@ Indexing - Bug in :meth:`DataFrame.loc` with dictionary of values changes columns with dtype of ``int`` to ``float`` (:issue:`34573`) - Bug in :meth:`Series.loc` when used with a :class:`MultiIndex` would raise an IndexingError when accessing a None value (:issue:`34318`) - Bug in :meth:`DataFrame.reset_index` and :meth:`Series.reset_index` would not preserve data types on an empty :class:`DataFrame` or :class:`Series` with a :class:`MultiIndex` (:issue:`19602`) +- Bug in :class:`Series` and :class:`DataFrame` indexing with a ``time`` key on a :class:`DatetimeIndex` with ``NaT`` entries (:issue:`35114`) Missing ^^^^^^^ diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 2351aca749dcc..5ea7c0b6c5d02 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -16,7 +16,6 @@ from pandas._config.localization import set_locale from pandas._libs.tslibs.ccalendar import MONTHS_FULL, DAYS_FULL from pandas._libs.tslibs.ccalendar cimport ( - DAY_NANOS, get_days_in_month, is_leapyear, dayofweek, get_week_of_year, get_day_of_year, get_iso_calendar, iso_calendar_t, month_offset, @@ -28,27 +27,6 @@ from pandas._libs.tslibs.nattype cimport NPY_NAT from pandas._libs.tslibs.strptime import LocaleTime -def get_time_micros(const int64_t[:] dtindex): - """ - Return the number of microseconds in the time component of a - nanosecond timestamp. - - Parameters - ---------- - dtindex : ndarray[int64_t] - - Returns - ------- - micros : ndarray[int64_t] - """ - cdef: - ndarray[int64_t] micros - - micros = np.mod(dtindex, DAY_NANOS, dtype=np.int64) - micros //= 1000 - return micros - - @cython.wraparound(False) @cython.boundscheck(False) def build_field_sarray(const int64_t[:] dtindex): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 86c6cdf5b15c7..0317d0b93859b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -6,7 +6,7 @@ import numpy as np from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib -from pandas._libs.tslibs import Resolution, fields, parsing, timezones, to_offset +from pandas._libs.tslibs import Resolution, parsing, timezones, to_offset from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label from pandas.errors import InvalidIndexError @@ -86,7 +86,6 @@ def _new_DatetimeIndex(cls, d): "tzinfo", "dtype", "to_pydatetime", - "_local_timestamps", "_has_same_tz", "_format_native_types", "date", @@ -380,10 +379,22 @@ def union_many(self, others): # -------------------------------------------------------------------- def _get_time_micros(self): + """ + Return the number of microseconds since midnight. + + Returns + ------- + ndarray[int64_t] + """ values = self.asi8 if self.tz is not None and not timezones.is_utc(self.tz): values = self._data._local_timestamps() - return fields.get_time_micros(values) + + nanos = values % (24 * 3600 * 1_000_000_000) + micros = nanos // 1000 + + micros[self._isnan] = -1 + return micros def to_series(self, keep_tz=lib.no_default, index=None, name=None): """ @@ -1094,6 +1105,6 @@ def bdate_range( ) -def _time_to_micros(time): - seconds = time.hour * 60 * 60 + 60 * time.minute + time.second - return 1000000 * seconds + time.microsecond +def _time_to_micros(time_obj: time) -> int: + seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second + return 1_000_000 * seconds + time_obj.microsecond diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 6d6193ceaf27d..5d2c6daba3f57 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -471,6 +471,16 @@ def test_get_loc(self): with pytest.raises(NotImplementedError, match=msg): idx.get_loc(time(12, 30), method="pad") + def test_get_loc_time_nat(self): + # GH#35114 + # Case where key's total microseconds happens to match iNaT % 1e6 // 1000 + tic = time(minute=12, second=43, microsecond=145224) + dti = pd.DatetimeIndex([pd.NaT]) + + loc = dti.get_loc(tic) + expected = np.array([], dtype=np.intp) + tm.assert_numpy_array_equal(loc, expected) + def test_get_loc_tz_aware(self): # https://github.com/pandas-dev/pandas/issues/32140 dti = pd.date_range( From 031fb16835af1f92522649d0b1f53808c5d9338c Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Tue, 7 Jul 2020 14:19:59 +0100 Subject: [PATCH 062/632] CLN: Index._format_with_header (remove kwargs etc.) (#35118) --- pandas/core/indexes/base.py | 27 ++++++++++++++++----------- pandas/core/indexes/category.py | 9 +++++++++ pandas/core/indexes/datetimelike.py | 6 ++++-- pandas/core/indexes/interval.py | 6 +++--- pandas/core/indexes/range.py | 4 ++-- pandas/io/formats/style.py | 5 ++++- 6 files changed, 38 insertions(+), 19 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b12a556a8291d..2f12a2e4c27ea 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2,7 +2,16 @@ from datetime import datetime import operator from textwrap import dedent -from typing import TYPE_CHECKING, Any, Callable, FrozenSet, Hashable, Optional, Union +from typing import ( + TYPE_CHECKING, + Any, + Callable, + FrozenSet, + Hashable, + List, + Optional, + Union, +) import warnings import numpy as np @@ -910,15 +919,12 @@ def format(self, name: bool = False, formatter=None, **kwargs): return self._format_with_header(header, **kwargs) - def _format_with_header(self, header, na_rep="NaN", **kwargs): - values = self._values - + def _format_with_header(self, header, na_rep="NaN") -> List[str_t]: from pandas.io.formats.format import format_array - if is_categorical_dtype(values.dtype): - values = np.array(values) + values = self._values - elif is_object_dtype(values.dtype): + if is_object_dtype(values.dtype): values = lib.maybe_convert_objects(values, safe=1) if is_object_dtype(values.dtype): @@ -929,10 +935,9 @@ def _format_with_header(self, header, na_rep="NaN", **kwargs): if mask.any(): result = np.array(result) result[mask] = na_rep - result = result.tolist() - + result = result.tolist() # type: ignore else: - result = _trim_front(format_array(values, None, justify="left")) + result = trim_front(format_array(values, None, justify="left")) return header + result def to_native_types(self, slicer=None, **kwargs): @@ -5611,7 +5616,7 @@ def ensure_has_len(seq): return seq -def _trim_front(strings): +def trim_front(strings: List[str]) -> List[str]: """ Trims zeros and decimal points. """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 2a79c83de7ef2..b0b008de69a94 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -347,6 +347,15 @@ def _format_attrs(self): attrs.append(("length", len(self))) return attrs + def _format_with_header(self, header, na_rep="NaN") -> List[str]: + from pandas.io.formats.format import format_array + + formatted_values = format_array( + self._values, formatter=None, na_rep=na_rep, justify="left" + ) + result = ibase.trim_front(formatted_values) + return header + result + # -------------------------------------------------------------------- @property diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 49b8ec3276e37..7be6aa50fa16b 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -338,8 +338,10 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs): # -------------------------------------------------------------------- # Rendering Methods - def _format_with_header(self, header, na_rep="NaT", **kwargs): - return header + list(self._format_native_types(na_rep, **kwargs)) + def _format_with_header(self, header, na_rep="NaT", date_format=None) -> List[str]: + return header + list( + self._format_native_types(na_rep=na_rep, date_format=date_format) + ) @property def _formatter_func(self): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index f7a7b382b853f..9548ebbd9c3b2 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,7 +1,7 @@ """ define the IntervalIndex """ from operator import le, lt import textwrap -from typing import Any, Optional, Tuple, Union +from typing import Any, List, Optional, Tuple, Union import numpy as np @@ -948,8 +948,8 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): # Rendering Methods # __repr__ associated methods are based on MultiIndex - def _format_with_header(self, header, **kwargs): - return header + list(self._format_native_types(**kwargs)) + def _format_with_header(self, header, na_rep="NaN") -> List[str]: + return header + list(self._format_native_types(na_rep=na_rep)) def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): # GH 28210: use base method but with different default na_rep diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 49a0f0fb7ae92..6d9fd6efe54a3 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,7 +1,7 @@ from datetime import timedelta import operator from sys import getsizeof -from typing import Any, Optional +from typing import Any, List, Optional import warnings import numpy as np @@ -197,7 +197,7 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None - def _format_with_header(self, header, na_rep="NaN", **kwargs): + def _format_with_header(self, header, na_rep="NaN") -> List[str]: return header + list(map(pprint_thing, self._range)) # -------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index f7ba4750bc2ad..6250e99252928 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1524,7 +1524,10 @@ def _get_level_lengths(index, hidden_elements=None): Result is a dictionary of (level, initial_position): span """ - levels = index.format(sparsify=lib.no_default, adjoin=False, names=False) + if isinstance(index, pd.MultiIndex): + levels = index.format(sparsify=lib.no_default, adjoin=False) + else: + levels = index.format() if hidden_elements is None: hidden_elements = [] From 08eca211e7e606022ecddecbf0dd8f6114d0ffe4 Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Tue, 7 Jul 2020 20:19:26 +0100 Subject: [PATCH 063/632] TYP, DOC, CLN:SeriesGroupBy._wrap_applied_output (#35120) --- pandas/core/groupby/generic.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 6f956a3dcc9b6..ebb9d82766c1b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -19,6 +19,7 @@ Iterable, List, Mapping, + Optional, Sequence, Tuple, Type, @@ -30,7 +31,7 @@ import numpy as np from pandas._libs import lib -from pandas._typing import FrameOrSeries +from pandas._typing import FrameOrSeries, FrameOrSeriesUnion from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.cast import ( @@ -413,12 +414,31 @@ def _wrap_transformed_output( assert isinstance(result, Series) return result - def _wrap_applied_output(self, keys, values, not_indexed_same=False): + def _wrap_applied_output( + self, keys: Index, values: Optional[List[Any]], not_indexed_same: bool = False + ) -> FrameOrSeriesUnion: + """ + Wrap the output of SeriesGroupBy.apply into the expected result. + + Parameters + ---------- + keys : Index + Keys of groups that Series was grouped by. + values : Optional[List[Any]] + Applied output for each group. + not_indexed_same : bool, default False + Whether the applied outputs are not indexed the same as the group axes. + + Returns + ------- + DataFrame or Series + """ if len(keys) == 0: # GH #6265 return self.obj._constructor( [], name=self._selection_name, index=keys, dtype=np.float64 ) + assert values is not None def _get_index() -> Index: if self.grouper.nkeys > 1: @@ -430,7 +450,7 @@ def _get_index() -> Index: if isinstance(values[0], dict): # GH #823 #24880 index = _get_index() - result = self._reindex_output( + result: FrameOrSeriesUnion = self._reindex_output( self.obj._constructor_expanddim(values, index=index) ) # if self.observed is False, @@ -438,11 +458,7 @@ def _get_index() -> Index: result = result.stack(dropna=self.observed) result.name = self._selection_name return result - - if isinstance(values[0], Series): - return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) - elif isinstance(values[0], DataFrame): - # possible that Series -> DataFrame by applied function + elif isinstance(values[0], (Series, DataFrame)): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) else: # GH #6265 #24880 From 42a6d4457a1ff096e645d62ab90aca9210af86c3 Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Tue, 7 Jul 2020 20:24:24 +0100 Subject: [PATCH 064/632] CI: add validate_unwanted_patterns to known_third_parties (#35021) --- scripts/tests/test_validate_unwanted_patterns.py | 1 - setup.cfg | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/tests/test_validate_unwanted_patterns.py b/scripts/tests/test_validate_unwanted_patterns.py index b6cfa20cd7ca0..947666a730ee9 100644 --- a/scripts/tests/test_validate_unwanted_patterns.py +++ b/scripts/tests/test_validate_unwanted_patterns.py @@ -1,7 +1,6 @@ import io import pytest - import validate_unwanted_patterns diff --git a/setup.cfg b/setup.cfg index 074b0b6bdff71..00af7f6f1b79a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -105,7 +105,7 @@ known_dtypes = pandas.core.dtypes known_post_core = pandas.tseries,pandas.io,pandas.plotting sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER known_first_party = pandas -known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,yaml,odf +known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 From 02a6acfd06748628e8a73c519a619522078d67b1 Mon Sep 17 00:00:00 2001 From: Suvayu Ali Date: Wed, 8 Jul 2020 11:47:08 +0000 Subject: [PATCH 065/632] BUG: incorrect type when indexing sparse dataframe with iterable (#34908) * TST: regression tests for indexing sparse dataframe with iterable closes #34526 --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/arrays/sparse/array.py | 7 +-- pandas/tests/frame/indexing/test_indexing.py | 15 ------ pandas/tests/frame/indexing/test_sparse.py | 51 ++++++++++++++++++++ 4 files changed, 54 insertions(+), 20 deletions(-) create mode 100644 pandas/tests/frame/indexing/test_sparse.py diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cee41f248fc60..386fe3ce2160f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1124,6 +1124,7 @@ Sparse - Bug where :class:`DataFrame` containing :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`) - The repr of :class:`SparseDtype` now includes the repr of its ``fill_value`` attribute. Previously it used ``fill_value``'s string representation (:issue:`34352`) - Bug where empty :class:`DataFrame` could not be cast to :class:`SparseDtype` (:issue:`33113`) +- Bug in :meth:`arrays.SparseArray` was returning the incorrect type when indexing a sparse dataframe with an iterable (:issue:`34526`, :issue:`34540`) ExtensionArray ^^^^^^^^^^^^^^ diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 4996a10002c63..b18a58da3950f 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -866,11 +866,8 @@ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: if self.sp_index.npoints == 0: # Avoid taking from the empty self.sp_values - taken = np.full( - sp_indexer.shape, - fill_value=fill_value, - dtype=np.result_type(type(fill_value)), - ) + _dtype = np.result_type(self.dtype.subtype, type(fill_value)) + taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) else: taken = self.sp_values.take(sp_indexer) diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 3865ea64ee479..3fa3c9303806f 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -21,7 +21,6 @@ notna, ) import pandas._testing as tm -from pandas.arrays import SparseArray import pandas.core.common as com from pandas.core.indexing import IndexingError @@ -1907,20 +1906,6 @@ def test_getitem_ix_float_duplicates(self): expect = df.iloc[[1, -1], 0] tm.assert_series_equal(df.loc[0.2, "a"], expect) - def test_getitem_sparse_column(self): - # https://github.com/pandas-dev/pandas/issues/23559 - data = SparseArray([0, 1]) - df = pd.DataFrame({"A": data}) - expected = pd.Series(data, name="A") - result = df["A"] - tm.assert_series_equal(result, expected) - - result = df.iloc[:, 0] - tm.assert_series_equal(result, expected) - - result = df.loc[:, "A"] - tm.assert_series_equal(result, expected) - def test_setitem_with_unaligned_tz_aware_datetime_column(self): # GH 12981 # Assignment of unaligned offset-aware datetime series. diff --git a/pandas/tests/frame/indexing/test_sparse.py b/pandas/tests/frame/indexing/test_sparse.py new file mode 100644 index 0000000000000..876fbe212c466 --- /dev/null +++ b/pandas/tests/frame/indexing/test_sparse.py @@ -0,0 +1,51 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import SparseArray +from pandas.core.arrays.sparse import SparseDtype + + +class TestSparseDataFrameIndexing: + def test_getitem_sparse_column(self): + # https://github.com/pandas-dev/pandas/issues/23559 + data = SparseArray([0, 1]) + df = pd.DataFrame({"A": data}) + expected = pd.Series(data, name="A") + result = df["A"] + tm.assert_series_equal(result, expected) + + result = df.iloc[:, 0] + tm.assert_series_equal(result, expected) + + result = df.loc[:, "A"] + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("spmatrix_t", ["coo_matrix", "csc_matrix", "csr_matrix"]) + @pytest.mark.parametrize("dtype", [np.int64, np.float64, complex]) + @td.skip_if_no_scipy + def test_locindexer_from_spmatrix(self, spmatrix_t, dtype): + import scipy.sparse + + spmatrix_t = getattr(scipy.sparse, spmatrix_t) + + # The bug is triggered by a sparse matrix with purely sparse columns. So the + # recipe below generates a rectangular matrix of dimension (5, 7) where all the + # diagonal cells are ones, meaning the last two columns are purely sparse. + rows, cols = 5, 7 + spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype) + df = pd.DataFrame.sparse.from_spmatrix(spmatrix) + + # regression test for #34526 + itr_idx = range(2, rows) + result = df.loc[itr_idx].values + expected = spmatrix.toarray()[itr_idx] + tm.assert_numpy_array_equal(result, expected) + + # regression test for #34540 + result = df.loc[itr_idx].dtypes.values + expected = np.full(cols, SparseDtype(dtype, fill_value=0)) + tm.assert_numpy_array_equal(result, expected) From d9b56a130e0c90f49b05576a747ca03ddad086ef Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 8 Jul 2020 07:40:45 -0500 Subject: [PATCH 066/632] Fixed Series.apply performance regression (#35166) --- pandas/core/apply.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 9c223d66b727b..d4be660939773 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -291,16 +291,14 @@ def apply_series_generator(self, partial_result=None) -> Tuple[ResType, "Index"] res_index = res_index.take(successes) else: - for i, v in series_gen_enumeration: - - with option_context("mode.chained_assignment", None): + with option_context("mode.chained_assignment", None): + for i, v in series_gen_enumeration: # ignore SettingWithCopy here in case the user mutates results[i] = self.f(v) - - if isinstance(results[i], ABCSeries): - # If we have a view on v, we need to make a copy because - # series_generator will swap out the underlying data - results[i] = results[i].copy(deep=False) + if isinstance(results[i], ABCSeries): + # If we have a view on v, we need to make a copy because + # series_generator will swap out the underlying data + results[i] = results[i].copy(deep=False) return results, res_index From e9d90b4bad413592615159be16a96959a0fb0a88 Mon Sep 17 00:00:00 2001 From: Irv Lustig Date: Wed, 8 Jul 2020 08:43:18 -0400 Subject: [PATCH 067/632] Fix regression on datetime in MultiIndex (#35140) --- pandas/core/indexing.py | 4 +++ .../indexing/multiindex/test_datetime.py | 30 ++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 708b687434327..04d1dbceb3342 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1165,6 +1165,10 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): if len(key) == labels.nlevels: return {"key": key} raise + except InvalidIndexError: + # GH35015, using datetime as column indices raises exception + if not isinstance(labels, ABCMultiIndex): + raise except TypeError: pass except ValueError: diff --git a/pandas/tests/indexing/multiindex/test_datetime.py b/pandas/tests/indexing/multiindex/test_datetime.py index 907d20cd5bd53..a49cb0bc2c43e 100644 --- a/pandas/tests/indexing/multiindex/test_datetime.py +++ b/pandas/tests/indexing/multiindex/test_datetime.py @@ -2,7 +2,16 @@ import numpy as np -from pandas import Index, Period, Series, period_range +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + period_range, + to_datetime, +) +import pandas._testing as tm def test_multiindex_period_datetime(): @@ -20,3 +29,22 @@ def test_multiindex_period_datetime(): # try datetime as index result = s.loc["a", datetime(2012, 1, 1)] assert result == expected + + +def test_multiindex_datetime_columns(): + # GH35015, using datetime as column indices raises exception + + mi = MultiIndex.from_tuples( + [(to_datetime("02/29/2020"), to_datetime("03/01/2020"))], names=["a", "b"] + ) + + df = DataFrame([], columns=mi) + + expected_df = DataFrame( + [], + columns=MultiIndex.from_arrays( + [[to_datetime("02/29/2020")], [to_datetime("03/01/2020")]], names=["a", "b"] + ), + ) + + tm.assert_frame_equal(df, expected_df) From 27de04416d1b604e7bd96373cea89fc2e0c2a1ee Mon Sep 17 00:00:00 2001 From: Robin to Roxel <35864265+r-toroxel@users.noreply.github.com> Date: Wed, 8 Jul 2020 14:52:25 +0200 Subject: [PATCH 068/632] TST add test case for drop_duplicates (#35121) --- .../frame/methods/test_drop_duplicates.py | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/pandas/tests/frame/methods/test_drop_duplicates.py b/pandas/tests/frame/methods/test_drop_duplicates.py index 7c6391140e2bb..cebec215a0d9d 100644 --- a/pandas/tests/frame/methods/test_drop_duplicates.py +++ b/pandas/tests/frame/methods/test_drop_duplicates.py @@ -333,64 +333,73 @@ def test_drop_duplicates_inplace(): ) # single column df = orig.copy() - df.drop_duplicates("A", inplace=True) + return_value = df.drop_duplicates("A", inplace=True) expected = orig[:2] result = df tm.assert_frame_equal(result, expected) + assert return_value is None df = orig.copy() - df.drop_duplicates("A", keep="last", inplace=True) + return_value = df.drop_duplicates("A", keep="last", inplace=True) expected = orig.loc[[6, 7]] result = df tm.assert_frame_equal(result, expected) + assert return_value is None df = orig.copy() - df.drop_duplicates("A", keep=False, inplace=True) + return_value = df.drop_duplicates("A", keep=False, inplace=True) expected = orig.loc[[]] result = df tm.assert_frame_equal(result, expected) assert len(df) == 0 + assert return_value is None # multi column df = orig.copy() - df.drop_duplicates(["A", "B"], inplace=True) + return_value = df.drop_duplicates(["A", "B"], inplace=True) expected = orig.loc[[0, 1, 2, 3]] result = df tm.assert_frame_equal(result, expected) + assert return_value is None df = orig.copy() - df.drop_duplicates(["A", "B"], keep="last", inplace=True) + return_value = df.drop_duplicates(["A", "B"], keep="last", inplace=True) expected = orig.loc[[0, 5, 6, 7]] result = df tm.assert_frame_equal(result, expected) + assert return_value is None df = orig.copy() - df.drop_duplicates(["A", "B"], keep=False, inplace=True) + return_value = df.drop_duplicates(["A", "B"], keep=False, inplace=True) expected = orig.loc[[0]] result = df tm.assert_frame_equal(result, expected) + assert return_value is None # consider everything orig2 = orig.loc[:, ["A", "B", "C"]].copy() df2 = orig2.copy() - df2.drop_duplicates(inplace=True) + return_value = df2.drop_duplicates(inplace=True) # in this case only expected = orig2.drop_duplicates(["A", "B"]) result = df2 tm.assert_frame_equal(result, expected) + assert return_value is None df2 = orig2.copy() - df2.drop_duplicates(keep="last", inplace=True) + return_value = df2.drop_duplicates(keep="last", inplace=True) expected = orig2.drop_duplicates(["A", "B"], keep="last") result = df2 tm.assert_frame_equal(result, expected) + assert return_value is None df2 = orig2.copy() - df2.drop_duplicates(keep=False, inplace=True) + return_value = df2.drop_duplicates(keep=False, inplace=True) expected = orig2.drop_duplicates(["A", "B"], keep=False) result = df2 tm.assert_frame_equal(result, expected) + assert return_value is None @pytest.mark.parametrize("inplace", [True, False]) From 16bbae03b29e7c34768e4f48986f4749f72c1a8e Mon Sep 17 00:00:00 2001 From: Mak Sze Chun Date: Wed, 8 Jul 2020 22:38:15 +0800 Subject: [PATCH 069/632] ENH: Add argmax and argmin to ExtensionArray (#27801) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/arrays/base.py | 36 +++++++++++++++++++++++++- pandas/core/sorting.py | 27 +++++++++++++++++++ pandas/tests/extension/base/methods.py | 36 ++++++++++++++++++++++++++ pandas/tests/extension/test_boolean.py | 17 ++++++++++++ pandas/tests/extension/test_sparse.py | 8 ++++++ 6 files changed, 124 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 386fe3ce2160f..46e0d2a1164e1 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -280,6 +280,7 @@ Other enhancements - :meth:`Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) - When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`) - `OptionError` is now exposed in `pandas.errors` (:issue:`27553`) +- Add :meth:`ExtensionArray.argmax` and :meth:`ExtensionArray.argmin` (:issue:`24382`) - :func:`timedelta_range` will now infer a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`) - Positional slicing on a :class:`IntervalIndex` now supports slices with ``step > 1`` (:issue:`31658`) - :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`). diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 5565b85f8d59a..32a2a30fcfd43 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -28,7 +28,7 @@ from pandas.core import ops from pandas.core.algorithms import _factorize_array, unique from pandas.core.missing import backfill_1d, pad_1d -from pandas.core.sorting import nargsort +from pandas.core.sorting import nargminmax, nargsort _extension_array_shared_docs: Dict[str, str] = dict() @@ -533,6 +533,40 @@ def argsort( result = nargsort(self, kind=kind, ascending=ascending, na_position="last") return result + def argmin(self): + """ + Return the index of minimum value. + + In case of multiple occurrences of the minimum value, the index + corresponding to the first occurrence is returned. + + Returns + ------- + int + + See Also + -------- + ExtensionArray.argmax + """ + return nargminmax(self, "argmin") + + def argmax(self): + """ + Return the index of maximum value. + + In case of multiple occurrences of the maximum value, the index + corresponding to the first occurrence is returned. + + Returns + ------- + int + + See Also + -------- + ExtensionArray.argmin + """ + return nargminmax(self, "argmax") + def fillna(self, value=None, method=None, limit=None): """ Fill NA/NaN values using the specified method. diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index da9cbe1023599..ee73aa42701b0 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -319,6 +319,33 @@ def nargsort( return indexer +def nargminmax(values, method: str): + """ + Implementation of np.argmin/argmax but for ExtensionArray and which + handles missing values. + + Parameters + ---------- + values : ExtensionArray + method : {"argmax", "argmin"} + + Returns + ------- + int + """ + assert method in {"argmax", "argmin"} + func = np.argmax if method == "argmax" else np.argmin + + mask = np.asarray(isna(values)) + values = values._values_for_argsort() + + idx = np.arange(len(values)) + non_nans = values[~mask] + non_nan_idx = idx[~mask] + + return non_nan_idx[func(non_nans)] + + def ensure_key_mapped_multiindex(index, key: Callable, level=None): """ Returns a new MultiIndex in which key has been applied diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 874a8dfd4253f..5e1cf30efd534 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -75,6 +75,42 @@ def test_argsort_missing(self, data_missing_for_sorting): expected = pd.Series(np.array([1, -1, 0], dtype=np.int64)) self.assert_series_equal(result, expected) + def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value): + # GH 24382 + + # data_for_sorting -> [B, C, A] with A < B < C + assert data_for_sorting.argmax() == 1 + assert data_for_sorting.argmin() == 2 + + # with repeated values -> first occurence + data = data_for_sorting.take([2, 0, 0, 1, 1, 2]) + assert data.argmax() == 3 + assert data.argmin() == 0 + + # with missing values + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + assert data_missing_for_sorting.argmax() == 0 + assert data_missing_for_sorting.argmin() == 2 + + @pytest.mark.parametrize( + "method", ["argmax", "argmin"], + ) + def test_argmin_argmax_empty_array(self, method, data): + # GH 24382 + err_msg = "attempt to get" + with pytest.raises(ValueError, match=err_msg): + getattr(data[:0], method)() + + @pytest.mark.parametrize( + "method", ["argmax", "argmin"], + ) + def test_argmin_argmax_all_na(self, method, data, na_value): + # all missing with skipna=True is the same as emtpy + err_msg = "attempt to get" + data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype) + with pytest.raises(ValueError, match=err_msg): + getattr(data_na, method)() + @pytest.mark.parametrize( "na_position, expected", [ diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index 725067951eeef..8acbeaf0b8170 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -235,6 +235,23 @@ def test_searchsorted(self, data_for_sorting, as_series): def test_value_counts(self, all_data, dropna): return super().test_value_counts(all_data, dropna) + def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting): + # override because there are only 2 unique values + + # data_for_sorting -> [B, C, A] with A < B < C -> here True, True, False + assert data_for_sorting.argmax() == 0 + assert data_for_sorting.argmin() == 2 + + # with repeated values -> first occurence + data = data_for_sorting.take([2, 0, 0, 1, 1, 2]) + assert data.argmax() == 1 + assert data.argmin() == 0 + + # with missing values + # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing. + assert data_missing_for_sorting.argmax() == 0 + assert data_missing_for_sorting.argmin() == 2 + class TestCasting(base.BaseCastingTests): pass diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index f318934ef5e52..68e521b005c02 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -321,6 +321,14 @@ def test_shift_0_periods(self, data): data._sparse_values[0] = data._sparse_values[1] assert result._sparse_values[0] != result._sparse_values[1] + @pytest.mark.parametrize( + "method", ["argmax", "argmin"], + ) + def test_argmin_argmax_all_na(self, method, data, na_value): + # overriding because Sparse[int64, 0] cannot handle na_value + self._check_unsupported(data) + super().test_argmin_argmax_all_na(method, data, na_value) + @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame]) def test_equals(self, data, na_value, as_series, box): self._check_unsupported(data) From 2ed252af4f18c53a7121a07a19c4af94f2ddfd4f Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Wed, 8 Jul 2020 16:35:25 +0100 Subject: [PATCH 070/632] TST: add test to ensure that df.groupby() returns the missing categories when grouping on 2 pd.Categoricals (#35022) --- pandas/tests/groupby/test_categorical.py | 174 ++++++++++++++++++----- 1 file changed, 142 insertions(+), 32 deletions(-) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 4de61f719dfbb..118d928ac02f4 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -36,6 +36,41 @@ def f(a): return result.reindex(index).sort_index() +_results_for_groupbys_with_missing_categories = dict( + # This maps the builtin groupby functions to their expected outputs for + # missing categories when they are called on a categorical grouper with + # observed=False. Some functions are expected to return NaN, some zero. + # These expected values can be used across several tests (i.e. they are + # the same for SeriesGroupBy and DataFrameGroupBy) but they should only be + # hardcoded in one place. + [ + ("all", np.NaN), + ("any", np.NaN), + ("count", 0), + ("corrwith", np.NaN), + ("first", np.NaN), + ("idxmax", np.NaN), + ("idxmin", np.NaN), + ("last", np.NaN), + ("mad", np.NaN), + ("max", np.NaN), + ("mean", np.NaN), + ("median", np.NaN), + ("min", np.NaN), + ("nth", np.NaN), + ("nunique", 0), + ("prod", np.NaN), + ("quantile", np.NaN), + ("sem", np.NaN), + ("size", 0), + ("skew", np.NaN), + ("std", np.NaN), + ("sum", 0), + ("var", np.NaN), + ] +) + + def test_apply_use_categorical_name(df): cats = qcut(df.C, 4) @@ -1263,12 +1298,13 @@ def test_series_groupby_on_2_categoricals_unobserved( reduction_func: str, observed: bool, request ): # GH 17605 - if reduction_func == "ngroup": pytest.skip("ngroup is not truly a reduction") if reduction_func == "corrwith": # GH 32293 - mark = pytest.mark.xfail(reason="TODO: implemented SeriesGroupBy.corrwith") + mark = pytest.mark.xfail( + reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293" + ) request.node.add_marker(mark) df = pd.DataFrame( @@ -1289,36 +1325,30 @@ def test_series_groupby_on_2_categoricals_unobserved( assert len(result) == expected_length -@pytest.mark.parametrize( - "func, zero_or_nan", - [ - ("all", np.NaN), - ("any", np.NaN), - ("count", 0), - ("first", np.NaN), - ("idxmax", np.NaN), - ("idxmin", np.NaN), - ("last", np.NaN), - ("mad", np.NaN), - ("max", np.NaN), - ("mean", np.NaN), - ("median", np.NaN), - ("min", np.NaN), - ("nth", np.NaN), - ("nunique", 0), - ("prod", np.NaN), - ("quantile", np.NaN), - ("sem", np.NaN), - ("size", 0), - ("skew", np.NaN), - ("std", np.NaN), - ("sum", np.NaN), - ("var", np.NaN), - ], -) -def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(func, zero_or_nan): +def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( + reduction_func: str, request +): # GH 17605 # Tests whether the unobserved categories in the result contain 0 or NaN + + if reduction_func == "ngroup": + pytest.skip("ngroup is not truly a reduction") + + if reduction_func == "corrwith": # GH 32293 + mark = pytest.mark.xfail( + reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293" + ) + request.node.add_marker(mark) + + if reduction_func == "sum": # GH 31422 + mark = pytest.mark.xfail( + reason=( + "sum should return 0 but currently returns NaN. " + "This is a known bug. See GH 31422." + ) + ) + request.node.add_marker(mark) + df = pd.DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), @@ -1327,12 +1357,14 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(func, zero_o } ) unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")] - args = {"nth": [0]}.get(func, []) + args = {"nth": [0]}.get(reduction_func, []) series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"] - agg = getattr(series_groupby, func) + agg = getattr(series_groupby, reduction_func) result = agg(*args) + zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func] + for idx in unobserved: val = result.loc[idx] assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan) @@ -1342,6 +1374,84 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(func, zero_o assert np.issubdtype(result.dtype, np.integer) +def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func: str): + # GH 23865 + # GH 27075 + # Ensure that df.groupby, when 'by' is two pd.Categorical variables, + # does not return the categories that are not in df when observed=True + if reduction_func == "ngroup": + pytest.skip("ngroup does not return the Categories on the index") + + df = pd.DataFrame( + { + "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), + "cat_2": pd.Categorical(list("1111"), categories=list("12")), + "value": [0.1, 0.1, 0.1, 0.1], + } + ) + unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")] + + df_grp = df.groupby(["cat_1", "cat_2"], observed=True) + + args = {"nth": [0], "corrwith": [df]}.get(reduction_func, []) + res = getattr(df_grp, reduction_func)(*args) + + for cat in unobserved_cats: + assert cat not in res.index + + +@pytest.mark.parametrize("observed", [False, None]) +def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( + reduction_func: str, observed: bool, request +): + # GH 23865 + # GH 27075 + # Ensure that df.groupby, when 'by' is two pd.Categorical variables, + # returns the categories that are not in df when observed=False/None + + if reduction_func == "ngroup": + pytest.skip("ngroup does not return the Categories on the index") + + if reduction_func == "count": # GH 35028 + mark = pytest.mark.xfail( + reason=( + "DataFrameGroupBy.count returns np.NaN for missing " + "categories, when it should return 0. See GH 35028" + ) + ) + request.node.add_marker(mark) + + if reduction_func == "sum": # GH 31422 + mark = pytest.mark.xfail( + reason=( + "sum should return 0 but currently returns NaN. " + "This is a known bug. See GH 31422." + ) + ) + request.node.add_marker(mark) + + df = pd.DataFrame( + { + "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), + "cat_2": pd.Categorical(list("1111"), categories=list("12")), + "value": [0.1, 0.1, 0.1, 0.1], + } + ) + unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")] + + df_grp = df.groupby(["cat_1", "cat_2"], observed=observed) + + args = {"nth": [0], "corrwith": [df]}.get(reduction_func, []) + res = getattr(df_grp, reduction_func)(*args) + + expected = _results_for_groupbys_with_missing_categories[reduction_func] + + if expected is np.nan: + assert res.loc[unobserved_cats].isnull().all().all() + else: + assert (res.loc[unobserved_cats] == expected).all().all() + + def test_series_groupby_categorical_aggregation_getitem(): # GH 8870 d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]} From 74f77a1f010e99379e58df5f7612743b4b8616d5 Mon Sep 17 00:00:00 2001 From: Vijay Sai Mutyala Date: Wed, 8 Jul 2020 15:36:28 +0000 Subject: [PATCH 071/632] =?UTF-8?q?DOC=20-=20Moving=20Tips=20and=20Trick?= =?UTF-8?q?=20from=20wiki=20to=20Style=20Guide=20-=20added=20Reading=20?= =?UTF-8?q?=E2=80=A6=20(#34366)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/source/development/code_style.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst index 6d33537a40175..11d0c35f92ff5 100644 --- a/doc/source/development/code_style.rst +++ b/doc/source/development/code_style.rst @@ -159,3 +159,18 @@ For example: # wrong from common import test_base + + +Miscellaneous +============= + +Reading from a url +------------------ + +**Good:** + +.. code-block:: python + + from pandas.io.common import urlopen + with urlopen('http://www.google.com') as url: + raw_text = url.read() From 42fd7e7d9a2c115af9a52f7f896d48b75a271efe Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Wed, 8 Jul 2020 18:15:55 +0200 Subject: [PATCH 072/632] ENH: concat of nullable int + bool preserves int dtype (#34985) --- doc/source/whatsnew/v1.1.0.rst | 2 +- pandas/core/arrays/integer.py | 9 +++-- pandas/tests/arrays/integer/test_concat.py | 45 +++++++++++++++++++++- 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 46e0d2a1164e1..24283d2c2e48d 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -322,7 +322,7 @@ Other enhancements - :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`) - :func:`concat` and :meth:`~DataFrame.append` now preserve extension dtypes, for example combining a nullable integer column with a numpy integer column will no longer - result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`). + result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`, :issue:`34095`). - :meth:`~pandas.io.gbq.read_gbq` now allows to disable progress bar (:issue:`33360`). - :meth:`~pandas.io.gbq.read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). - :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`). diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index df43b5d6115ba..7be7ef3637ee5 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -92,10 +92,13 @@ def construct_array_type(cls) -> Type["IntegerArray"]: return IntegerArray def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: - # for now only handle other integer types + # we only handle nullable EA dtypes and numeric numpy dtypes if not all( - isinstance(t, _IntegerDtype) - or (isinstance(t, np.dtype) and np.issubdtype(t, np.integer)) + isinstance(t, BaseMaskedDtype) + or ( + isinstance(t, np.dtype) + and (np.issubdtype(t, np.number) or np.issubdtype(t, np.bool_)) + ) for t in dtypes ): return None diff --git a/pandas/tests/arrays/integer/test_concat.py b/pandas/tests/arrays/integer/test_concat.py index 3ace35700bd3e..fc24709deb82c 100644 --- a/pandas/tests/arrays/integer/test_concat.py +++ b/pandas/tests/arrays/integer/test_concat.py @@ -1,3 +1,4 @@ +import numpy as np import pytest import pandas as pd @@ -15,12 +16,52 @@ (["Int32", "UInt32"], "Int64"), # this still gives object (awaiting float extension dtype) (["Int64", "UInt64"], "object"), + (["Int64", "boolean"], "Int64"), + (["UInt8", "boolean"], "UInt8"), ], ) def test_concat_series(to_concat_dtypes, result_dtype): - result = pd.concat([pd.Series([1, 2, pd.NA], dtype=t) for t in to_concat_dtypes]) - expected = pd.concat([pd.Series([1, 2, pd.NA], dtype=object)] * 2).astype( + result = pd.concat([pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( result_dtype ) tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat( + [pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes[::-1]] + ) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "int64"], "Int64"), + (["UInt64", "uint64"], "UInt64"), + (["Int8", "int8"], "Int8"), + (["Int8", "int16"], "Int16"), + (["UInt8", "int8"], "Int16"), + (["Int32", "uint32"], "Int64"), + # this still gives object (awaiting float extension dtype) + (["Int64", "uint64"], "object"), + (["Int64", "bool"], "Int64"), + (["UInt8", "bool"], "UInt8"), + ], +) +def test_concat_series_with_numpy(to_concat_dtypes, result_dtype): + + s1 = pd.Series([0, 1, pd.NA], dtype=to_concat_dtypes[0]) + s2 = pd.Series(np.array([0, 1], dtype=to_concat_dtypes[1])) + result = pd.concat([s1, s2], ignore_index=True) + expected = pd.Series([0, 1, pd.NA, 0, 1], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat([s2, s1], ignore_index=True) + expected = pd.Series([0, 1, 0, 1, pd.NA], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) From 0cef6057f50705d3a4bc26c9bc93c0dadb56c4ed Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 8 Jul 2020 14:50:16 -0700 Subject: [PATCH 073/632] REF: collect get_dst_info-using functions in tslibs.vectorized (#35168) --- asv_bench/benchmarks/tslibs/resolution.py | 5 +- asv_bench/benchmarks/tslibs/tslib.py | 5 +- pandas/_libs/tslib.pyx | 170 +-------- pandas/_libs/tslibs/__init__.py | 12 + pandas/_libs/tslibs/conversion.pxd | 1 - pandas/_libs/tslibs/conversion.pyx | 127 ------- pandas/_libs/tslibs/offsets.pyx | 3 +- pandas/_libs/tslibs/period.pxd | 5 + pandas/_libs/tslibs/period.pyx | 57 --- pandas/_libs/tslibs/resolution.pyx | 98 +---- pandas/_libs/tslibs/vectorized.pyx | 440 ++++++++++++++++++++++ pandas/core/arrays/datetimes.py | 20 +- pandas/core/arrays/period.py | 3 +- pandas/core/dtypes/cast.py | 5 +- pandas/core/indexes/datetimes.py | 12 +- pandas/tests/tslibs/test_api.py | 6 + setup.py | 2 + 17 files changed, 504 insertions(+), 467 deletions(-) create mode 100644 pandas/_libs/tslibs/vectorized.pyx diff --git a/asv_bench/benchmarks/tslibs/resolution.py b/asv_bench/benchmarks/tslibs/resolution.py index 274aa1ad6d4a9..280be7932d4db 100644 --- a/asv_bench/benchmarks/tslibs/resolution.py +++ b/asv_bench/benchmarks/tslibs/resolution.py @@ -23,7 +23,10 @@ import numpy as np import pytz -from pandas._libs.tslibs.resolution import get_resolution +try: + from pandas._libs.tslibs import get_resolution +except ImportError: + from pandas._libs.tslibs.resolution import get_resolution class TimeResolution: diff --git a/asv_bench/benchmarks/tslibs/tslib.py b/asv_bench/benchmarks/tslibs/tslib.py index eacf5a5731dc2..5952a402bf89a 100644 --- a/asv_bench/benchmarks/tslibs/tslib.py +++ b/asv_bench/benchmarks/tslibs/tslib.py @@ -21,7 +21,10 @@ import numpy as np import pytz -from pandas._libs.tslib import ints_to_pydatetime +try: + from pandas._libs.tslibs import ints_to_pydatetime +except ImportError: + from pandas._libs.tslib import ints_to_pydatetime _tzs = [ None, diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 3472dbf161b8e..d70d0378a2621 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -4,18 +4,14 @@ from cpython.datetime cimport ( PyDate_Check, PyDateTime_Check, PyDateTime_IMPORT, - date, datetime, - time, - timedelta, - tzinfo, ) # import datetime C API PyDateTime_IMPORT cimport numpy as cnp -from numpy cimport float64_t, int64_t, ndarray, uint8_t, intp_t +from numpy cimport float64_t, int64_t, ndarray import numpy as np cnp.import_array() @@ -42,11 +38,6 @@ from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.parsing import parse_datetime_string -from pandas._libs.tslibs.timezones cimport ( - get_dst_info, - is_utc, - is_tzlocal, -) from pandas._libs.tslibs.conversion cimport ( _TSObject, cast_from_unit, @@ -60,13 +51,10 @@ from pandas._libs.tslibs.nattype cimport ( c_nat_strings as nat_strings, ) -from pandas._libs.tslibs.offsets cimport to_offset - -from pandas._libs.tslibs.timestamps cimport create_timestamp_from_ts, _Timestamp +from pandas._libs.tslibs.timestamps cimport _Timestamp from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.tzconversion cimport ( - tz_convert_utc_to_tzlocal, tz_localize_to_utc_single, ) @@ -74,160 +62,6 @@ from pandas._libs.tslibs.tzconversion cimport ( from pandas._libs.missing cimport checknull_with_nat_and_na -cdef inline object create_datetime_from_ts( - int64_t value, - npy_datetimestruct dts, - tzinfo tz, - object freq, - bint fold, -): - """ - Convenience routine to construct a datetime.datetime from its parts. - """ - return datetime( - dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold - ) - - -cdef inline object create_date_from_ts( - int64_t value, - npy_datetimestruct dts, - tzinfo tz, - object freq, - bint fold -): - """ - Convenience routine to construct a datetime.date from its parts. - """ - # GH 25057 add fold argument to match other func_create signatures - return date(dts.year, dts.month, dts.day) - - -cdef inline object create_time_from_ts( - int64_t value, - npy_datetimestruct dts, - tzinfo tz, - object freq, - bint fold -): - """ - Convenience routine to construct a datetime.time from its parts. - """ - return time(dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold) - - -@cython.wraparound(False) -@cython.boundscheck(False) -def ints_to_pydatetime( - const int64_t[:] arr, - tzinfo tz=None, - object freq=None, - bint fold=False, - str box="datetime" -): - """ - Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp. - - Parameters - ---------- - arr : array of i8 - tz : str, optional - convert to this timezone - freq : str/Offset, optional - freq to convert - fold : bint, default is 0 - Due to daylight saving time, one wall clock time can occur twice - when shifting from summer to winter time; fold describes whether the - datetime-like corresponds to the first (0) or the second time (1) - the wall clock hits the ambiguous time - - .. versionadded:: 1.1.0 - box : {'datetime', 'timestamp', 'date', 'time'}, default 'datetime' - * If datetime, convert to datetime.datetime - * If date, convert to datetime.date - * If time, convert to datetime.time - * If Timestamp, convert to pandas.Timestamp - - Returns - ------- - ndarray of dtype specified by box - """ - cdef: - Py_ssize_t i, n = len(arr) - ndarray[int64_t] trans - int64_t[:] deltas - intp_t[:] pos - npy_datetimestruct dts - object dt, new_tz - str typ - int64_t value, local_value, delta = NPY_NAT # dummy for delta - ndarray[object] result = np.empty(n, dtype=object) - object (*func_create)(int64_t, npy_datetimestruct, tzinfo, object, bint) - bint use_utc = False, use_tzlocal = False, use_fixed = False - bint use_pytz = False - - if box == "date": - assert (tz is None), "tz should be None when converting to date" - - func_create = create_date_from_ts - elif box == "timestamp": - func_create = create_timestamp_from_ts - - if isinstance(freq, str): - freq = to_offset(freq) - elif box == "time": - func_create = create_time_from_ts - elif box == "datetime": - func_create = create_datetime_from_ts - else: - raise ValueError( - "box must be one of 'datetime', 'date', 'time' or 'timestamp'" - ) - - if is_utc(tz) or tz is None: - use_utc = True - elif is_tzlocal(tz): - use_tzlocal = True - else: - trans, deltas, typ = get_dst_info(tz) - if typ not in ["pytz", "dateutil"]: - # static/fixed; in this case we know that len(delta) == 1 - use_fixed = True - delta = deltas[0] - else: - pos = trans.searchsorted(arr, side="right") - 1 - use_pytz = typ == "pytz" - - for i in range(n): - new_tz = tz - value = arr[i] - - if value == NPY_NAT: - result[i] = NaT - else: - if use_utc: - local_value = value - elif use_tzlocal: - local_value = tz_convert_utc_to_tzlocal(value, tz) - elif use_fixed: - local_value = value + delta - elif not use_pytz: - # i.e. dateutil - # no zone-name change for dateutil tzs - dst etc - # represented in single object. - local_value = value + deltas[pos[i]] - else: - # pytz - # find right representation of dst etc in pytz timezone - new_tz = tz._tzinfos[tz._transition_info[pos[i]]] - local_value = value + deltas[pos[i]] - - dt64_to_dtstruct(local_value, &dts) - result[i] = func_create(value, dts, new_tz, freq, fold) - - return result - - def _test_parse_iso8601(ts: str): """ TESTING ONLY: Parse string into Timestamp using iso8601 parser. Used diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 76e356370de70..c2f3478a50ab4 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -11,8 +11,13 @@ "Period", "Resolution", "Timedelta", + "normalize_i8_timestamps", + "is_date_array_normalized", + "dt64arr_to_periodarr", "delta_to_nanoseconds", + "ints_to_pydatetime", "ints_to_pytimedelta", + "get_resolution", "Timestamp", "tz_convert_single", "to_offset", @@ -30,3 +35,10 @@ from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta from .timestamps import Timestamp from .tzconversion import tz_convert_single +from .vectorized import ( + dt64arr_to_periodarr, + get_resolution, + ints_to_pydatetime, + is_date_array_normalized, + normalize_i8_timestamps, +) diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 0eb94fecf7d6b..73772e5ab4577 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -25,5 +25,4 @@ cdef int64_t get_datetime64_nanos(object val) except? -1 cpdef datetime localize_pydatetime(datetime dt, object tz) cdef int64_t cast_from_unit(object ts, str unit) except? -1 -cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz) cdef int64_t normalize_i8_stamp(int64_t local_val) nogil diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 36a4a1f60d8b9..31d2d0e9572f5 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -765,73 +765,6 @@ cpdef inline datetime localize_pydatetime(datetime dt, object tz): # ---------------------------------------------------------------------- # Normalization - -@cython.wraparound(False) -@cython.boundscheck(False) -cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz): - """ - Normalize each of the (nanosecond) timezone aware timestamps in the given - array by rounding down to the beginning of the day (i.e. midnight). - This is midnight for timezone, `tz`. - - Parameters - ---------- - stamps : int64 ndarray - tz : tzinfo or None - - Returns - ------- - result : int64 ndarray of converted of normalized nanosecond timestamps - """ - cdef: - Py_ssize_t i, n = len(stamps) - int64_t[:] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans - int64_t[:] deltas - str typ - Py_ssize_t[:] pos - int64_t delta, local_val - - if tz is None or is_utc(tz): - with nogil: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = stamps[i] - result[i] = normalize_i8_stamp(local_val) - elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - result[i] = normalize_i8_stamp(local_val) - else: - # Adjust datetime64 timestamp, recompute datetimestruct - trans, deltas, typ = get_dst_info(tz) - - if typ not in ['pytz', 'dateutil']: - # static/fixed; in this case we know that len(delta) == 1 - delta = deltas[0] - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = stamps[i] + delta - result[i] = normalize_i8_stamp(local_val) - else: - pos = trans.searchsorted(stamps, side='right') - 1 - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = stamps[i] + deltas[pos[i]] - result[i] = normalize_i8_stamp(local_val) - - return result.base # `.base` to access underlying ndarray - - @cython.cdivision cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil: """ @@ -848,63 +781,3 @@ cdef inline int64_t normalize_i8_stamp(int64_t local_val) nogil: cdef: int64_t day_nanos = 24 * 3600 * 1_000_000_000 return local_val - (local_val % day_nanos) - - -@cython.wraparound(False) -@cython.boundscheck(False) -def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None): - """ - Check if all of the given (nanosecond) timestamps are normalized to - midnight, i.e. hour == minute == second == 0. If the optional timezone - `tz` is not None, then this is midnight for this timezone. - - Parameters - ---------- - stamps : int64 ndarray - tz : tzinfo or None - - Returns - ------- - is_normalized : bool True if all stamps are normalized - """ - cdef: - Py_ssize_t i, n = len(stamps) - ndarray[int64_t] trans - int64_t[:] deltas - intp_t[:] pos - int64_t local_val, delta - str typ - int64_t day_nanos = 24 * 3600 * 1_000_000_000 - - if tz is None or is_utc(tz): - for i in range(n): - local_val = stamps[i] - if local_val % day_nanos != 0: - return False - - elif is_tzlocal(tz): - for i in range(n): - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - if local_val % day_nanos != 0: - return False - else: - trans, deltas, typ = get_dst_info(tz) - - if typ not in ['pytz', 'dateutil']: - # static/fixed; in this case we know that len(delta) == 1 - delta = deltas[0] - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - local_val = stamps[i] + delta - if local_val % day_nanos != 0: - return False - - else: - pos = trans.searchsorted(stamps) - 1 - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - local_val = stamps[i] + deltas[pos[i]] - if local_val % day_nanos != 0: - return False - - return True diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e4d05e0d70e2f..fb07e3fe7547e 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -38,7 +38,6 @@ from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, get_days_in_month, dayofwe from pandas._libs.tslibs.conversion cimport ( convert_datetime_to_tsobject, localize_pydatetime, - normalize_i8_timestamps, ) from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( @@ -92,6 +91,8 @@ def apply_index_wraps(func): result = np.asarray(result) if self.normalize: + # TODO: Avoid circular/runtime import + from .vectorized import normalize_i8_timestamps result = normalize_i8_timestamps(result.view("i8"), None) return result diff --git a/pandas/_libs/tslibs/period.pxd b/pandas/_libs/tslibs/period.pxd index eb11a4a572e85..9c0342e239a89 100644 --- a/pandas/_libs/tslibs/period.pxd +++ b/pandas/_libs/tslibs/period.pxd @@ -1 +1,6 @@ +from numpy cimport int64_t + +from .np_datetime cimport npy_datetimestruct + cdef bint is_period_object(object obj) +cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index c0641297c4b8a..e6ba1968797ed 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -14,7 +14,6 @@ import cython from cpython.datetime cimport ( datetime, - tzinfo, PyDate_Check, PyDateTime_Check, PyDateTime_IMPORT, @@ -41,7 +40,6 @@ cdef extern from "src/datetime/np_datetime.h": cimport pandas._libs.tslibs.util as util from pandas._libs.tslibs.timestamps import Timestamp -from pandas._libs.tslibs.timezones cimport is_utc, is_tzlocal, get_dst_info from pandas._libs.tslibs.timedeltas import Timedelta from pandas._libs.tslibs.timedeltas cimport ( delta_to_nanoseconds, @@ -91,7 +89,6 @@ from pandas._libs.tslibs.offsets cimport ( is_offset_object, ) from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG -from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal cdef: @@ -1416,60 +1413,6 @@ def extract_freq(ndarray[object] values): # period helpers -@cython.wraparound(False) -@cython.boundscheck(False) -def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, tzinfo tz): - cdef: - Py_ssize_t n = len(stamps) - int64_t[:] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans - int64_t[:] deltas - Py_ssize_t[:] pos - npy_datetimestruct dts - int64_t local_val - - if is_utc(tz) or tz is None: - with nogil: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - result[i] = get_period_ordinal(&dts, freq) - - elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - result[i] = get_period_ordinal(&dts, freq) - else: - # Adjust datetime64 timestamp, recompute datetimestruct - trans, deltas, typ = get_dst_info(tz) - - if typ not in ['pytz', 'dateutil']: - # static/fixed; in this case we know that len(delta) == 1 - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) - result[i] = get_period_ordinal(&dts, freq) - else: - pos = trans.searchsorted(stamps, side='right') - 1 - - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - result[i] = get_period_ordinal(&dts, freq) - - return result.base # .base to get underlying ndarray - - DIFFERENT_FREQ = ("Input has different freq={other_freq} " "from {cls}(freq={own_freq})") diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index d5f10374d2860..d2861d8e9fe8d 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -1,105 +1,9 @@ -from cpython.datetime cimport tzinfo import numpy as np -from numpy cimport ndarray, int64_t, int32_t - -from pandas._libs.tslibs.util cimport get_nat +from numpy cimport int32_t from pandas._libs.tslibs.dtypes import Resolution -from pandas._libs.tslibs.np_datetime cimport ( - npy_datetimestruct, dt64_to_dtstruct) -from pandas._libs.tslibs.timezones cimport ( - is_utc, is_tzlocal, get_dst_info) from pandas._libs.tslibs.ccalendar cimport get_days_in_month -from pandas._libs.tslibs.tzconversion cimport tz_convert_utc_to_tzlocal - -# ---------------------------------------------------------------------- -# Constants - -cdef: - int64_t NPY_NAT = get_nat() - - int RESO_NS = 0 - int RESO_US = 1 - int RESO_MS = 2 - int RESO_SEC = 3 - int RESO_MIN = 4 - int RESO_HR = 5 - int RESO_DAY = 6 - int RESO_MTH = 7 - int RESO_QTR = 8 - int RESO_YR = 9 - - -# ---------------------------------------------------------------------- - - -def get_resolution(const int64_t[:] stamps, tzinfo tz=None): - cdef: - Py_ssize_t i, n = len(stamps) - npy_datetimestruct dts - int reso = RESO_DAY, curr_reso - ndarray[int64_t] trans - int64_t[:] deltas - Py_ssize_t[:] pos - int64_t local_val, delta - - if is_utc(tz) or tz is None: - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso - elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso - else: - # Adjust datetime64 timestamp, recompute datetimestruct - trans, deltas, typ = get_dst_info(tz) - - if typ not in ['pytz', 'dateutil']: - # static/fixed; in this case we know that len(delta) == 1 - delta = deltas[0] - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i] + delta, &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso - else: - pos = trans.searchsorted(stamps, side='right') - 1 - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso - - return Resolution(reso) - - -cdef inline int _reso_stamp(npy_datetimestruct *dts): - if dts.us != 0: - if dts.us % 1000 == 0: - return RESO_MS - return RESO_US - elif dts.sec != 0: - return RESO_SEC - elif dts.min != 0: - return RESO_MIN - elif dts.hour != 0: - return RESO_HR - return RESO_DAY # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx new file mode 100644 index 0000000000000..c8f8daf6724c2 --- /dev/null +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -0,0 +1,440 @@ +import cython + +from cpython.datetime cimport datetime, date, time, tzinfo + +import numpy as np +from numpy cimport int64_t, intp_t, ndarray + +from .conversion cimport normalize_i8_stamp +from .dtypes import Resolution +from .nattype cimport NPY_NAT, c_NaT as NaT +from .np_datetime cimport npy_datetimestruct, dt64_to_dtstruct +from .offsets cimport to_offset +from .period cimport get_period_ordinal +from .timestamps cimport create_timestamp_from_ts +from .timezones cimport is_utc, is_tzlocal, get_dst_info +from .tzconversion cimport tz_convert_utc_to_tzlocal + +# ------------------------------------------------------------------------- + +cdef inline object create_datetime_from_ts( + int64_t value, + npy_datetimestruct dts, + tzinfo tz, + object freq, + bint fold, +): + """ + Convenience routine to construct a datetime.datetime from its parts. + """ + return datetime( + dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, + tz, fold=fold, + ) + + +cdef inline object create_date_from_ts( + int64_t value, + npy_datetimestruct dts, + tzinfo tz, + object freq, + bint fold +): + """ + Convenience routine to construct a datetime.date from its parts. + """ + # GH#25057 add fold argument to match other func_create signatures + return date(dts.year, dts.month, dts.day) + + +cdef inline object create_time_from_ts( + int64_t value, + npy_datetimestruct dts, + tzinfo tz, + object freq, + bint fold +): + """ + Convenience routine to construct a datetime.time from its parts. + """ + return time(dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold) + + +@cython.wraparound(False) +@cython.boundscheck(False) +def ints_to_pydatetime( + const int64_t[:] arr, + tzinfo tz=None, + object freq=None, + bint fold=False, + str box="datetime" +): + """ + Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp. + + Parameters + ---------- + arr : array of i8 + tz : str, optional + convert to this timezone + freq : str/Offset, optional + freq to convert + fold : bint, default is 0 + Due to daylight saving time, one wall clock time can occur twice + when shifting from summer to winter time; fold describes whether the + datetime-like corresponds to the first (0) or the second time (1) + the wall clock hits the ambiguous time + + .. versionadded:: 1.1.0 + box : {'datetime', 'timestamp', 'date', 'time'}, default 'datetime' + * If datetime, convert to datetime.datetime + * If date, convert to datetime.date + * If time, convert to datetime.time + * If Timestamp, convert to pandas.Timestamp + + Returns + ------- + ndarray of dtype specified by box + """ + cdef: + Py_ssize_t i, n = len(arr) + ndarray[int64_t] trans + int64_t[:] deltas + intp_t[:] pos + npy_datetimestruct dts + object dt, new_tz + str typ + int64_t value, local_value, delta = NPY_NAT # dummy for delta + ndarray[object] result = np.empty(n, dtype=object) + object (*func_create)(int64_t, npy_datetimestruct, tzinfo, object, bint) + bint use_utc = False, use_tzlocal = False, use_fixed = False + bint use_pytz = False + + if box == "date": + assert (tz is None), "tz should be None when converting to date" + + func_create = create_date_from_ts + elif box == "timestamp": + func_create = create_timestamp_from_ts + + if isinstance(freq, str): + freq = to_offset(freq) + elif box == "time": + func_create = create_time_from_ts + elif box == "datetime": + func_create = create_datetime_from_ts + else: + raise ValueError( + "box must be one of 'datetime', 'date', 'time' or 'timestamp'" + ) + + if is_utc(tz) or tz is None: + use_utc = True + elif is_tzlocal(tz): + use_tzlocal = True + else: + trans, deltas, typ = get_dst_info(tz) + if typ not in ["pytz", "dateutil"]: + # static/fixed; in this case we know that len(delta) == 1 + use_fixed = True + delta = deltas[0] + else: + pos = trans.searchsorted(arr, side="right") - 1 + use_pytz = typ == "pytz" + + for i in range(n): + new_tz = tz + value = arr[i] + + if value == NPY_NAT: + result[i] = NaT + else: + if use_utc: + local_value = value + elif use_tzlocal: + local_value = tz_convert_utc_to_tzlocal(value, tz) + elif use_fixed: + local_value = value + delta + elif not use_pytz: + # i.e. dateutil + # no zone-name change for dateutil tzs - dst etc + # represented in single object. + local_value = value + deltas[pos[i]] + else: + # pytz + # find right representation of dst etc in pytz timezone + new_tz = tz._tzinfos[tz._transition_info[pos[i]]] + local_value = value + deltas[pos[i]] + + dt64_to_dtstruct(local_value, &dts) + result[i] = func_create(value, dts, new_tz, freq, fold) + + return result + + +# ------------------------------------------------------------------------- + +cdef: + int RESO_NS = 0 + int RESO_US = 1 + int RESO_MS = 2 + int RESO_SEC = 3 + int RESO_MIN = 4 + int RESO_HR = 5 + int RESO_DAY = 6 + int RESO_MTH = 7 + int RESO_QTR = 8 + int RESO_YR = 9 + + +cdef inline int _reso_stamp(npy_datetimestruct *dts): + if dts.us != 0: + if dts.us % 1000 == 0: + return RESO_MS + return RESO_US + elif dts.sec != 0: + return RESO_SEC + elif dts.min != 0: + return RESO_MIN + elif dts.hour != 0: + return RESO_HR + return RESO_DAY + + +def get_resolution(const int64_t[:] stamps, tzinfo tz=None): + cdef: + Py_ssize_t i, n = len(stamps) + npy_datetimestruct dts + int reso = RESO_DAY, curr_reso + ndarray[int64_t] trans + int64_t[:] deltas + Py_ssize_t[:] pos + int64_t local_val, delta + + if is_utc(tz) or tz is None: + for i in range(n): + if stamps[i] == NPY_NAT: + continue + dt64_to_dtstruct(stamps[i], &dts) + curr_reso = _reso_stamp(&dts) + if curr_reso < reso: + reso = curr_reso + elif is_tzlocal(tz): + for i in range(n): + if stamps[i] == NPY_NAT: + continue + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + dt64_to_dtstruct(local_val, &dts) + curr_reso = _reso_stamp(&dts) + if curr_reso < reso: + reso = curr_reso + else: + # Adjust datetime64 timestamp, recompute datetimestruct + trans, deltas, typ = get_dst_info(tz) + + if typ not in ["pytz", "dateutil"]: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] + for i in range(n): + if stamps[i] == NPY_NAT: + continue + dt64_to_dtstruct(stamps[i] + delta, &dts) + curr_reso = _reso_stamp(&dts) + if curr_reso < reso: + reso = curr_reso + else: + pos = trans.searchsorted(stamps, side="right") - 1 + for i in range(n): + if stamps[i] == NPY_NAT: + continue + dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) + curr_reso = _reso_stamp(&dts) + if curr_reso < reso: + reso = curr_reso + + return Resolution(reso) + + +# ------------------------------------------------------------------------- + +@cython.wraparound(False) +@cython.boundscheck(False) +cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo tz): + """ + Normalize each of the (nanosecond) timezone aware timestamps in the given + array by rounding down to the beginning of the day (i.e. midnight). + This is midnight for timezone, `tz`. + + Parameters + ---------- + stamps : int64 ndarray + tz : tzinfo or None + + Returns + ------- + result : int64 ndarray of converted of normalized nanosecond timestamps + """ + cdef: + Py_ssize_t i, n = len(stamps) + int64_t[:] result = np.empty(n, dtype=np.int64) + ndarray[int64_t] trans + int64_t[:] deltas + str typ + Py_ssize_t[:] pos + int64_t delta, local_val + + if tz is None or is_utc(tz): + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + local_val = stamps[i] + result[i] = normalize_i8_stamp(local_val) + elif is_tzlocal(tz): + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + result[i] = normalize_i8_stamp(local_val) + else: + # Adjust datetime64 timestamp, recompute datetimestruct + trans, deltas, typ = get_dst_info(tz) + + if typ not in ["pytz", "dateutil"]: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + local_val = stamps[i] + delta + result[i] = normalize_i8_stamp(local_val) + else: + pos = trans.searchsorted(stamps, side="right") - 1 + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + local_val = stamps[i] + deltas[pos[i]] + result[i] = normalize_i8_stamp(local_val) + + return result.base # `.base` to access underlying ndarray + + +@cython.wraparound(False) +@cython.boundscheck(False) +def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None): + """ + Check if all of the given (nanosecond) timestamps are normalized to + midnight, i.e. hour == minute == second == 0. If the optional timezone + `tz` is not None, then this is midnight for this timezone. + + Parameters + ---------- + stamps : int64 ndarray + tz : tzinfo or None + + Returns + ------- + is_normalized : bool True if all stamps are normalized + """ + cdef: + Py_ssize_t i, n = len(stamps) + ndarray[int64_t] trans + int64_t[:] deltas + intp_t[:] pos + int64_t local_val, delta + str typ + int64_t day_nanos = 24 * 3600 * 1_000_000_000 + + if tz is None or is_utc(tz): + for i in range(n): + local_val = stamps[i] + if local_val % day_nanos != 0: + return False + + elif is_tzlocal(tz): + for i in range(n): + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + if local_val % day_nanos != 0: + return False + else: + trans, deltas, typ = get_dst_info(tz) + + if typ not in ["pytz", "dateutil"]: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + local_val = stamps[i] + delta + if local_val % day_nanos != 0: + return False + + else: + pos = trans.searchsorted(stamps) - 1 + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + local_val = stamps[i] + deltas[pos[i]] + if local_val % day_nanos != 0: + return False + + return True + + +# ------------------------------------------------------------------------- + + +@cython.wraparound(False) +@cython.boundscheck(False) +def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, tzinfo tz): + cdef: + Py_ssize_t n = len(stamps) + int64_t[:] result = np.empty(n, dtype=np.int64) + ndarray[int64_t] trans + int64_t[:] deltas + Py_ssize_t[:] pos + npy_datetimestruct dts + int64_t local_val + + if is_utc(tz) or tz is None: + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i], &dts) + result[i] = get_period_ordinal(&dts, freq) + + elif is_tzlocal(tz): + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + dt64_to_dtstruct(local_val, &dts) + result[i] = get_period_ordinal(&dts, freq) + else: + # Adjust datetime64 timestamp, recompute datetimestruct + trans, deltas, typ = get_dst_info(tz) + + if typ not in ["pytz", "dateutil"]: + # static/fixed; in this case we know that len(delta) == 1 + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i] + deltas[0], &dts) + result[i] = get_period_ordinal(&dts, freq) + else: + pos = trans.searchsorted(stamps, side="right") - 1 + + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) + result[i] = get_period_ordinal(&dts, freq) + + return result.base # .base to get underlying ndarray diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index fcfbaa4ac2a1c..8eac45cdedaec 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -10,7 +10,11 @@ Timestamp, conversion, fields, + get_resolution, iNaT, + ints_to_pydatetime, + is_date_array_normalized, + normalize_i8_timestamps, resolution as libresolution, timezones, to_offset, @@ -526,11 +530,11 @@ def is_normalized(self): """ Returns True if all of the dates are at midnight ("no time") """ - return conversion.is_date_array_normalized(self.asi8, self.tz) + return is_date_array_normalized(self.asi8, self.tz) @property # NB: override with cache_readonly in immutable subclasses def _resolution_obj(self) -> libresolution.Resolution: - return libresolution.get_resolution(self.asi8, self.tz) + return get_resolution(self.asi8, self.tz) # ---------------------------------------------------------------- # Array-Like / EA-Interface Methods @@ -559,7 +563,7 @@ def __iter__(self): for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, length) - converted = tslib.ints_to_pydatetime( + converted = ints_to_pydatetime( data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp" ) for v in converted: @@ -991,7 +995,7 @@ def to_pydatetime(self) -> np.ndarray: ------- datetimes : ndarray """ - return tslib.ints_to_pydatetime(self.asi8, tz=self.tz) + return ints_to_pydatetime(self.asi8, tz=self.tz) def normalize(self): """ @@ -1031,7 +1035,7 @@ def normalize(self): '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ - new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) + new_values = normalize_i8_timestamps(self.asi8, self.tz) return type(self)(new_values)._with_freq("infer").tz_localize(self.tz) def to_period(self, freq=None): @@ -1219,7 +1223,7 @@ def time(self): else: timestamps = self.asi8 - return tslib.ints_to_pydatetime(timestamps, box="time") + return ints_to_pydatetime(timestamps, box="time") @property def timetz(self): @@ -1227,7 +1231,7 @@ def timetz(self): Returns numpy array of datetime.time also containing timezone information. The time part of the Timestamps. """ - return tslib.ints_to_pydatetime(self.asi8, self.tz, box="time") + return ints_to_pydatetime(self.asi8, self.tz, box="time") @property def date(self): @@ -1243,7 +1247,7 @@ def date(self): else: timestamps = self.asi8 - return tslib.ints_to_pydatetime(timestamps, box="date") + return ints_to_pydatetime(timestamps, box="date") def isocalendar(self): """ diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 4b4df3445be4e..b336371655466 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -10,6 +10,7 @@ NaTType, Timedelta, delta_to_nanoseconds, + dt64arr_to_periodarr as c_dt64arr_to_periodarr, iNaT, period as libperiod, to_offset, @@ -951,7 +952,7 @@ def dt64arr_to_periodarr(data, freq, tz=None): data = data._values base = freq._period_dtype_code - return libperiod.dt64arr_to_periodarr(data.view("i8"), base, tz), freq + return c_dt64arr_to_periodarr(data.view("i8"), base, tz), freq def _get_ordinal_range(start, end, periods, freq, mult=1): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index d0417d51da497..6b84f0e81f48b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -15,6 +15,7 @@ Timedelta, Timestamp, iNaT, + ints_to_pydatetime, ) from pandas._libs.tslibs.timezones import tz_compare from pandas._typing import ArrayLike, Dtype, DtypeObj @@ -919,7 +920,7 @@ def astype_nansafe(arr, dtype, copy: bool = True, skipna: bool = False): elif is_datetime64_dtype(arr): if is_object_dtype(dtype): - return tslib.ints_to_pydatetime(arr.view(np.int64)) + return ints_to_pydatetime(arr.view(np.int64)) elif dtype == np.int64: if isna(arr).any(): raise ValueError("Cannot convert NaT values to integer") @@ -1399,7 +1400,7 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"): if value.dtype != DT64NS_DTYPE: value = value.astype(DT64NS_DTYPE) ints = np.asarray(value).view("i8") - return tslib.ints_to_pydatetime(ints) + return ints_to_pydatetime(ints) # we have a non-castable dtype that was passed raise TypeError(f"Cannot cast datetime64 to {dtype}") diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 0317d0b93859b..6d2e592f024ed 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -5,8 +5,14 @@ import numpy as np -from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib -from pandas._libs.tslibs import Resolution, parsing, timezones, to_offset +from pandas._libs import NaT, Period, Timestamp, index as libindex, lib +from pandas._libs.tslibs import ( + Resolution, + ints_to_pydatetime, + parsing, + timezones, + to_offset, +) from pandas._libs.tslibs.offsets import prefix_mapping from pandas._typing import DtypeObj, Label from pandas.errors import InvalidIndexError @@ -339,7 +345,7 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: def _mpl_repr(self): # how to represent ourselves to matplotlib - return tslib.ints_to_pydatetime(self.asi8, self.tz) + return ints_to_pydatetime(self.asi8, self.tz) @property def _formatter_func(self): diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 840a8c2fb68b1..957706fcb460e 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -18,6 +18,7 @@ def test_namespace(): "period", "resolution", "strptime", + "vectorized", "timedeltas", "timestamps", "timezones", @@ -37,7 +38,12 @@ def test_namespace(): "Resolution", "Tick", "Timedelta", + "dt64arr_to_periodarr", "Timestamp", + "is_date_array_normalized", + "ints_to_pydatetime", + "normalize_i8_timestamps", + "get_resolution", "delta_to_nanoseconds", "ints_to_pytimedelta", "localize_pydatetime", diff --git a/setup.py b/setup.py index e9d305d831653..1885546e001fe 100755 --- a/setup.py +++ b/setup.py @@ -322,6 +322,7 @@ class CheckSDist(sdist_class): "pandas/_libs/tslibs/resolution.pyx", "pandas/_libs/tslibs/parsing.pyx", "pandas/_libs/tslibs/tzconversion.pyx", + "pandas/_libs/tslibs/vectorized.pyx", "pandas/_libs/window/indexers.pyx", "pandas/_libs/writers.pyx", "pandas/io/sas/sas.pyx", @@ -659,6 +660,7 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): "pyxfile": "_libs/tslibs/tzconversion", "depends": tseries_depends, }, + "_libs.tslibs.vectorized": {"pyxfile": "_libs/tslibs/vectorized"}, "_libs.testing": {"pyxfile": "_libs/testing"}, "_libs.window.aggregations": { "pyxfile": "_libs/window/aggregations", From 3db2d02bc83d3ee653ec4034eae6c34197520b10 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 8 Jul 2020 14:51:12 -0700 Subject: [PATCH 074/632] ASV: tslibs.fields (#35149) --- asv_bench/benchmarks/tslibs/fields.py | 74 +++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 asv_bench/benchmarks/tslibs/fields.py diff --git a/asv_bench/benchmarks/tslibs/fields.py b/asv_bench/benchmarks/tslibs/fields.py new file mode 100644 index 0000000000000..0607a799ec707 --- /dev/null +++ b/asv_bench/benchmarks/tslibs/fields.py @@ -0,0 +1,74 @@ +import numpy as np + +from pandas._libs.tslibs.fields import ( + get_date_field, + get_start_end_field, + get_timedelta_field, +) + +from .tslib import _sizes + + +class TimeGetTimedeltaField: + params = [ + _sizes, + ["days", "h", "s", "seconds", "ms", "microseconds", "us", "ns", "nanoseconds"], + ] + param_names = ["size", "field"] + + def setup(self, size, field): + arr = np.random.randint(0, 10, size=size, dtype="i8") + self.i8data = arr + + def time_get_timedelta_field(self, size, field): + get_timedelta_field(self.i8data, field) + + +class TimeGetDateField: + params = [ + _sizes, + [ + "Y", + "M", + "D", + "h", + "m", + "s", + "us", + "ns", + "doy", + "dow", + "woy", + "q", + "dim", + "is_leap_year", + ], + ] + param_names = ["size", "field"] + + def setup(self, size, field): + arr = np.random.randint(0, 10, size=size, dtype="i8") + self.i8data = arr + + def time_get_date_field(self, size, field): + get_date_field(self.i8data, field) + + +class TimeGetStartEndField: + params = [ + _sizes, + ["start", "end"], + ["month", "quarter", "year"], + ["B", None, "QS"], + [12, 3, 5], + ] + param_names = ["size", "side", "period", "freqstr", "month_kw"] + + def setup(self, size, side, period, freqstr, month_kw): + arr = np.random.randint(0, 10, size=size, dtype="i8") + self.i8data = arr + + self.attrname = f"is_{period}_{side}" + + def time_get_start_end_field(self, size, side, period, freqstr, month_kw): + get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw) From ebca2a6dc8fdac33687648002a6c27fa15503887 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 8 Jul 2020 14:52:28 -0700 Subject: [PATCH 075/632] TYP: maybe_get_tz (#35103) --- pandas/_libs/tslibs/timezones.pxd | 2 +- pandas/_libs/tslibs/timezones.pyx | 8 +++++++- pandas/tests/tslibs/test_timezones.py | 12 ++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index f51ee41cb99a6..136710003d32a 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -9,7 +9,7 @@ cdef bint treat_tz_as_pytz(tzinfo tz) cpdef bint tz_compare(tzinfo start, tzinfo end) cpdef object get_timezone(tzinfo tz) -cpdef object maybe_get_tz(object tz) +cpdef tzinfo maybe_get_tz(object tz) cdef timedelta get_utcoffset(tzinfo tz, datetime obj) cdef bint is_fixed_offset(tzinfo tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 3b2104f75956a..a8c785704d8e8 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -84,7 +84,7 @@ cpdef inline object get_timezone(tzinfo tz): return tz -cpdef inline object maybe_get_tz(object tz): +cpdef inline tzinfo maybe_get_tz(object tz): """ (Maybe) Construct a timezone object from a string. If tz is a string, use it to construct a timezone object. Otherwise, just return tz. @@ -102,6 +102,12 @@ cpdef inline object maybe_get_tz(object tz): tz = pytz.timezone(tz) elif is_integer_object(tz): tz = pytz.FixedOffset(tz / 60) + elif isinstance(tz, tzinfo): + pass + elif tz is None: + pass + else: + raise TypeError(type(tz)) return tz diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py index 03cc8fcb6e904..81b41f567976d 100644 --- a/pandas/tests/tslibs/test_timezones.py +++ b/pandas/tests/tslibs/test_timezones.py @@ -106,3 +106,15 @@ def test_infer_tz_mismatch(infer_setup, ordered): with pytest.raises(AssertionError, match=msg): timezones.infer_tzinfo(*args) + + +def test_maybe_get_tz_invalid_types(): + with pytest.raises(TypeError, match=""): + timezones.maybe_get_tz(44.0) + + with pytest.raises(TypeError, match=""): + timezones.maybe_get_tz(pytz) + + msg = "" + with pytest.raises(TypeError, match=msg): + timezones.maybe_get_tz(Timestamp.now("UTC")) From c15f0808428b5cdb4eaea290fb26a71c3534630b Mon Sep 17 00:00:00 2001 From: Irv Lustig Date: Wed, 8 Jul 2020 18:03:47 -0400 Subject: [PATCH 076/632] Fix Issue 34748 - read in datetime as MultiIndex for column headers (#34954) * Fix Issue 34748 - read in datetime as MultiIndex for column headers * add more xls file formats * use testing pattern for other Excel files * added ods file * remove xfail for ods test * skip tests on xlsb with datetimes Co-authored-by: Will Ayd --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/parsers.py | 2 +- .../tests/io/data/excel/test_datetime_mi.ods | Bin 0 -> 3585 bytes .../tests/io/data/excel/test_datetime_mi.xls | Bin 0 -> 24576 bytes .../tests/io/data/excel/test_datetime_mi.xlsb | Bin 0 -> 7947 bytes .../tests/io/data/excel/test_datetime_mi.xlsm | Bin 0 -> 8700 bytes .../tests/io/data/excel/test_datetime_mi.xlsx | Bin 0 -> 8687 bytes pandas/tests/io/excel/test_readers.py | 19 ++++++++++++++++++ 8 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 pandas/tests/io/data/excel/test_datetime_mi.ods create mode 100644 pandas/tests/io/data/excel/test_datetime_mi.xls create mode 100644 pandas/tests/io/data/excel/test_datetime_mi.xlsb create mode 100644 pandas/tests/io/data/excel/test_datetime_mi.xlsm create mode 100644 pandas/tests/io/data/excel/test_datetime_mi.xlsx diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 24283d2c2e48d..ce0668917f800 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1051,6 +1051,7 @@ I/O - Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) - :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) +- Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) Plotting ^^^^^^^^ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c427d3a198b10..d4f346f8c1087 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1614,7 +1614,7 @@ def extract(r): # Clean the column names (if we have an index_col). if len(ic): col_names = [ - r[0] if (len(r[0]) and r[0] not in self.unnamed_cols) else None + r[0] if ((r[0] is not None) and r[0] not in self.unnamed_cols) else None for r in header ] else: diff --git a/pandas/tests/io/data/excel/test_datetime_mi.ods b/pandas/tests/io/data/excel/test_datetime_mi.ods new file mode 100644 index 0000000000000000000000000000000000000000..c37c35060c6508a0958b0ae21bd0181bd63017e5 GIT binary patch literal 3585 zcmZ{n2Q-|`8pl@`YY{|?&Jtqvo<#H(HHg(MHtK4-dJBSRQIhCHi`9Dz)+!M`Eu^fl zdW&9yh={m+_ui9pbMKw^%z59LIsfN9bLN@f%-`T15iteepM?t$4Q@*)fux%~ukow`|$j`_F4NUO*!ayuuS7wAedN=;qYljPC z$>G9qhpw!2o;gdm$Z1w#_8Oj8|R5anu^7~I+2^Vi)11)s(Z!@F6Iq`cI)c7379{!Y)TXV%E zBF^0`-ZqWmT6e89JFvLP&&uhiJFi-CC;3pzI9W!YNU5lY$GxmfEBee+E;Gei^>e%Y zr`fT|CgMT<{+g8xCrZ}>Tqm}88y=|T(Wnm_n22W&r`itr9YqhgN(j}yt@%-uM_@8h zK&T_~oPW^O{t4}rxJF8hCiN<^k%C;9B){d2Tr4nEqRG6i(n73@BN=fur(hS{M=8Dd zS{tLJJ1I8stR*+BE5V(Y&5N3aJo&o={y@dt+PxuMXh!n>@j|ml@;X}JxvXg1&z7vO z8Q!{r+)EJ;jBi4N9iFmhv}YhFGx(Wrt*+Yp>4hgP&}2W1ek3VKH>DwsZ`PiQ+?vR* zfKlPYc0+lg)_YVwKH$f4lR{*O)0F4_{f%6AB#vm?Pt29$kv+qERd46z-Ohr?#*LWwwu`Fc36fKM8KZ;e#AJTS`{zq zsCRq&>^Am@Wqdxz1-dke`jMM10O_OqxWB=Q67AC z@4P`)K7@Xw_<6Y@UoJ$;E!B|Ep7W{>T(mGM@Ij@b zl&%ML$@C8Z41FC&EL<=VC!m6(-NChb(e&rIY+tfc<620Gt%geUPVhK|bc!My>~QBz zEmkqrQ`*vubylIfFYg6c-HDEORbg;EIGE{ic-e+8>Yk{>;uL$=2K*Hb%}ED(Yz%S* zk`dOCJr{%Rt8G8ekL2MAht5nTg4Y|ayDf)hv=U8nZo;BNIRc$P+s$Cg`LoY@3VX4y zBhBAri#Xa}0fwZQatPc;6zMx~oKdQ}f40qm%{94}?Y~%I-W;8?n=yo65A?0}MLfCL zL$I;O^t?Hmij#;w;gHVSv~$+e=+~USZ*a@Yg%`(Jd3QV}_WY@~##5UaS&FF*tYjdS zK^F6K;1G2`*A8=*9CYIO;|Y?a$~SuN+q+AM&(dg)q0p*{P57eDEm*Q5Q){%c6ZXY| zDjBUlPjpu>0f`C7QiWMi%&6F2zh!X(PLXu-;iU)``_uP__m99In=45l(v|dt7xo25 zs0q+A86J?xJvQ|0tNN^{srLAXO#2;D1*+J)J9_>NQl02KOsL~u%(jGGJs#VhXXhXQ z;QX8j@Sm{D1j(MGE@1cODgXeyfStXEJL-~-m$ZZ8FK9;-A~F^x+W0O(Sg*d$HpsAX z&HiOGhhhqk1yq|}bFlvOh%L2HXs+3%?s}tQWLK=L%2&_P%!PqX{eE*Hs(chUTlcvF zc$|=hFW}Rb^!V<}FtEqC^7ScCK?kDjHDdW6jc5BYO+rI)N~7RS7bi+sz1VlZ@J%Us znMRtmnKHg~V^et#Cy?y0_oOl%-wNHA>FsT@pHoOQwI<&Ih;?Lo66e>}B8@&7$UQ(w zn>M~|Z#e1`)kmj$?<^NF6hCJ=YI{}EW2>Jo7gv5#AN%cXV*u-M$oWT+m&H?k>n=@k zxl$}o7TpeAZ-H|d9mU9ND>r0Aw3p`bC?5hBVP!oKCN@bSzjC#V(29dqKMgsN42}lJ zGeoNL$-gT^BzwjVg28Y_SZ>(DZC!($O@Fy|r+@541!ysm2a!5Dm({Ot zvOgJluX_l5-blyi59Jow!h*;B$Aa4Jy0HD#L#E5aBEbrz&etB$!v)9U@2^G~v(k&z z&!~bJDxJiknvp4Z)rU4xK!{o&BM0sA1?~*&w!gO}mHVEO>9KQAfUXoqa4y`cta!rz zMpX%j5=zf5EmrQ8%hM#A5t!Pkp?WxE3{B*M<&PqB5VSE|-yh)e#`3Eb@72->jnFUL zX~ci!o|oP4V!8#}*q}Dn&T1~wdAm@%5wg@;EE97SxF-fFp64=lSMwluRS-|AkSEJl zQy|}wD1Cc}Qfx0AR?8}LTEZ-DQ*qq1I({y~gMT*t>9gi{tg`x7!-5xu71^8hE2K4F z+YJ=4$%NWvqBU-b)IAQfn3~o}Jk>TNC}6}IiZ}?%8~nuMul>#Rv!?^a@X#35PRvgo zn^ERDXU=*llf)?Z4SQo5`dPfn8EA~UIwg!S@I)^kgb{h%1y5wVYON2M={CsPl*Sa= z-XEBeN&hqK>X>$~lyh0vb!f=dtmr06tcx0Kp9Rq!01omjym_ZB-xyu(}9 zYTV|0V{s)Dxe$b%O+oLd`<8Fn+M^7k_$^!N7y-FjcaDK>a~Z=lhbvm*sZ|R6T05ac zozARGpQ9%+H=FQo*Z9rc9IKaLoRTuc6Y3b3#;bK6 zQMi&>$eZ|VrQP6Ap|w*=O(mM!k#RlqHWm}iN{7krD%86V7KE zPIoIJVvDzRD%lQnPl@&zIcGJq#CGddf{IPHX%q8wn6V3Ra615Yy6UE%I}KQ3_6{z zViLKS=znwC?DZUX2LS-!Mf9(WE*u5Bgmi+DZp{Z#nhu_Q4(>U+{&ljC+Fz;2d3k9x z5549eZXxoz>v-@1i1Ry7b5#-g(~C2iGn13LXQ*|PZxlGKqU)-0wDA1bSMU3Q0u@3B z=Zb~{N=S^}xC%_>@2H;*#;~sS7T$D9=dZ zk@by323`A2`b_OJk)WPgZxWBO_+z3zf2JzIgzegSCVCw~Ha?PWCtQHx{d22@F9)w> zJ-EYJsmNFxZe)Mys4G?87C-xlsVa)5l7y}%BCy)-i4y$h5c9`>b>3)@`o7jge*GevMlb((vhpg; zTKokPXt?iqniayFn?5srepR##+O^4RmL(G8Yg^Pxl0q3(A1Dspp7Yi-Zx{fqtQkdrHg|YbO z3KI}Q{e&M(-EZpC@ZE%l-^m2sc;^Dl={UOTB);|Eh14Ur~ literal 0 HcmV?d00001 diff --git a/pandas/tests/io/data/excel/test_datetime_mi.xls b/pandas/tests/io/data/excel/test_datetime_mi.xls new file mode 100644 index 0000000000000000000000000000000000000000..aeade05855919877556f4adc171aa1c4b5c22c8b GIT binary patch literal 24576 zcmeHP3tUZE+h6B&s?$wZ>4Hud>0Y{vbVI0=gfT8joi0joR5CO*$&hPuS2M;XBG(Y5 z+%gd%DJBMmj3x{QjhW$m&)U0F`|NY6_nY_q{l4$_o_&66@4cSA*8h3dvz~Q%)?WM6 z-PCN}dCZ^%(T)*F4t?e;Aq5JZ0rz%-xCVmfd_Il}uC{OwB-!!*NCQf2s7M8Q^kwjM z#S4gu6xay0!1X8##}Il9VLF1E1GgRLCgAvc12?-<@hx;4bvggEzsGXtF>^4udR zLy!Tw0_ph~x!Ba1j1%$ zoyicxg(f8umeLBvg7mIkQmWCUmdaLtWjFcVXZ4k z#2CoxkK};|GBGnTwJ|ky2)7(I-9iwVZaI);4E=w9q36>=xluEO6PX}IG@v_?or5qA z#4*%h5Y>d9iiXyi)D2l7M})$$C{`SSWS!CJ76f6s<*-hIiLD999d6+r4qWVzCHx>O zHJ=rSn#qbIl*0?zAP-??WC?`%5u#C8dUM<(jSw6^PiTdjNq#)QeS5!&`(OBpOQd7 zE`d&#SClU)kE)0~hX@>)XpwfcQB!k^$`%#GBI`w~iyj?)&gh&GVv+JFK(|8J;UAEI zI7s_cfF4df-9`f4N-Q1sGz8AOPWaCDr6Qnr**|HAFf%06H=q!>H4?N(C1EYRKomQRn30je7kF^E+A#o?rbMB^BjQjNLrZ7Vg&GQJsX}>)7wYZXw<1D$d3lKl zMWqRZ!n97AVCqB)Po;?p1@l3QP=BUNP`5ats9O1FnxeNxR}hM9jad64LJ64hGZ|f) z5-pikSIKbu$A?&xE-WnU0uk5%RkZ?UR9f%VO>dZrgl!<5okp`{?4-*h zLHpzLYr8TCge@s4;iJw{2!thQoe~R#rP2hp(M1C;qP?iSkg&MAg*2GXo!QU~fPiouq>d5(0sU zc%9%0on)Qh>02*!A{|7(Hz2iv(m`NG*|Q{$PSQclg+L%8UMF}?DOo2vLLd_R6X_r@ zVeh#(wSm$>IK2UJbdnA-SO^3n;&p-@8p%4r)Bj%BiF6Rd-hk8wN(V9S4Tz%?6=VZb z3u(acg&Z9Qacnt0k&)cQ*lg`Fbl#PiLHl(XTu#x9R|NZoics5#eHcp~! z%%!j)oe_eK0yp~Q%a`KWXw%s^i?Xqj!Unr5ok~;WM)Tj+if5xkXX7Hu#!d=ksf zVRNG&w0DSS)0fW1Rg{gR6gK4Sl~9@zH~R5!_r$Z&#cZ6VTbdEDaqfks)drrZ{zW{S zesnfsEX`C38>*$%2L5sFka#wFbT(ovZLkzJR7U7jHTI2VMDdF+Q7=Yuf(%4q_YuYX--nuP%W)Cu)6t#csBhp8&~O;#sN02 zy|6S#r?vYf)Xe}o8!?tOyGiVkW_1EpFKLNC*YhzB+6 z1_k?}gASsBQf&qi&NeZs()CH)Vv!MY=aJJO9Q3a147gC z8^wbT?gj-*po0#ffl_N9p?Qb=#euSs1&pr9HzF#JJyf{n8HRj0AXAsGg2?>^kthZBA4I^^D-I>W z6bvtC;jtHk;#DgF1H6hvFi=3S1`H4JdIoGD5Q?#dVoE}>4NP@-^$P3uezDxhc#a=8 zAt8^QL6fT=Um4_zg~?zfiifG8AKWFtk17>js}V6DfmC?WMxje8GA59Uj0vP7V;f*? z);d#;pU371iy|)vO#DD4bKw^O0w9Fb+GHv~S_@iQ1=!5gotA^9!ALNpE01j^3-XxL z^04@9$ddw_J9xz0Hxe!C%3+tO2s!L%ITZP-A~}S4^+onD14pCM<|RZVMkFWk7IDJ3 zX~}tPe_<%53tt0`LTRw~2G@NeQU6IG$Rf~r7>JVw_JCK~1htPVyVJasplq-_63tZc!nXhmmBT(mDh=Z86}uKO}2Rk*byrOl9N(6^X_s&PxGP zND;0wrVf;6ENFHf*dng zL6iusp9@t^?E&Pvtg)*v67)WJC0hX$r;6M_el(t&lFW;M?uFJdGx|fvJRX2?NqJyX zxMLwb8lpdicOruF7Ql;>PJ;)N%0$o{O@w8IZBz;XhFcBpATSi}B9xFjZXV;4lDT>8 z*}_?v%FsM;mSm6tuS*L$TD&eT=)v%sa(BN<#sobW858tiWNZUd1r0^8h83KWOjPhf z8q>Z29ZVEiw2G-cJnj&Msu&ZxB_U%1NywN$5;BJSM~_T-P?edm2=elv_Yf#EuglLT zm8Fb^Vap7QOHKf74H3>Irw+9d2F8#K%EkH;HEsqr?%@PPQZlY^+ERm@J^_IhPSy~M zupy!41gv*Kw?URx5W^Owi9BI2L{lPoiMWR%ds8oAL76JhF;4*wJg{3qCnc^I#bY+1 z;kE29&|1oxNG_C46MkU;GMgyvL=brqaTf$-$9-Q4n3*GJF%G2?7Sx@By5-Il&0^qJ zBg>#v*?8UomSO7T?B?vgovnwqQ+P7)LpbntH*D10O?igttHF#J@VmfZLqN#oS0?%! z_E`xIKGX*rMY44v2xtiDcHT14X7&`-&6Fo!9HwAFT8L5VkeP+QjJwU>7-Ie%rF znQqNbCHp4*)e%@9=3B(kO|WTl>iB-x6JDi};>oJ51^dPwdFyL!)^6xOqwtYiaMPj5 zIo}xQ4UZ^P-}K}bRlk0NJrRX8&t;%Yxs-vno;WuMA`nJ!`4f{Ul#r-=|6Dl68 zEm^z7b!P4O3w)hf@4cRD->%J=mH`^!!<am~Kwb+(a&vO}BHOuzw8^oTLHt*Zri|?NduL!xdd-(MshJ|BF z9X4sHuV@Oc2tDi@b*o0Be%z@0&a=%j_N|{Ee%EO0_xjDQfw$l5hTe6Xb$DG-4gYwR zL+$s$7c!36w2opWx@`=Mdpdh(k^Pg2mAOUS0}TV~PM-dF=Mxk~w4m{eHtQ~h0V?y^HCA8_uRv~D#1&E~<(GP60JtNf=tzujT^cvQQ!xiZsTyzt}e=ia&r z-SJT)vNce|CW|lxi~VA1K$4-G#@T+)Gro9Hc`T}3Q-8f&d*8vw5==uwpN<*ZU}yV^ z=M$QmY@IjJUS4_6D0}&@+ILzQ{J=1HS8o)_@yY+ZqEA`9o#li#a--MlmLk7^bf>@F zorXCLTl~wb<14-zyjA7mbj$Mjn&;Kg zJ42dQXyl~ZkJc>m?7yx?V{v+6{iCTCNfS@6E(}reySjLsOR1T2sF zQj9+6AGw|B>-)!N)*tEf;Lx1-;@B3oPt&Xm z7GC=`vc<)sG5pI53q694?pk*D2c7Vl7q%Fl9DVK15g*I5C0R#wQ>PUkC=6ffJMM9f za(T$%1ebpMu5%(scNDz-vaDf$;*CweSgiQgro2HnQukWQG2NY!)x%JT_e!%I&!GXY z_0)`4=C8>t*r6BjJ^wUKM*~eAQNW_S&uU*&SoEmo3Z; zuJpIblJofE3FBPBNUxEX^1O|orxqn%t78tV)ktYE_{p_JyU)~W`3g7B($gP&M{K%u z?Dr~%lFS*^u}kOgV+=e{sjAYkZ+`Q`g5aQ^O|!>OU+gv^Vd!bkAWo&$maEY#Y`6RC z{HP`GUR^)&+%cuMN!KQNGrFhO`=dn9f4|-o+_F)X`>uqK1tYX{y z$ve{2M$g!n*rIKybV`0#pSS6OwoVWICgzxIxjJHHQQYfvGrxW{4_B)PO`kUAb#kWV z@Aph?vICywRxMK7=asGcBz5tY8zzP|gSMSceq~x67_};ByYWh`ex?48>n=U)r_G-5 zbik0dtcBasmlwsqmRotZ{4b>oo6p<~X001J=aNTJ~W?wa$0DG^-yz`KynD+U74@Z;th18yCNbm@>=LJN4j!c0HBY z4PoKOx0`h29Q7?-E?4?(@WRrKY0(yIHNy9OL=}nmD%2+3*fvhRZQg9AI{QS1hW+^6 z{gvw?SRG#;@;5G-v!z`3RoZv+uVwTryWD(scxmkP=CdsQC2{&=UhJ(^oO|29;T}hQ z>xe9cs!fSLmv|X=yM8^OoA1$D{UIp-48y>*_)*T7C5gWuZ}_m0|6S?j`EJvLtDa0w zC_ETY>d_v0^ervVI(W0mCPHV@Q zo;S-ra-jBtg05Zjm}_YTH8*oq7aNb8U;Qht@9yZ>n z7$1DZX>VUf-E4akPDgUk{27afr<6N4*fqzk%GOMDRhp~+pgBilVD>M!_D{C=eNuAv zaI4+P=av(nf8w0r88qHEnmvC~_|l(wi9a0MW*Ph9yE~t*PS*F0UXfSs^=;0OC2O=+ z4A}X7xrSQv$!j5R-+g;-T=dw}xwd8(8=5`Z_B3%!v=*$(D|%J@BnAL(7;m38Ut8;< zf52gdxYkHh#qp=UTYWil_{P2W*YxGKz0-1BvfaAi+?+SXOE;P2XPsHx^1Q*Y&N3sL zbLnd1F308-?6mnIfro~gyf(K9h+6cwZQs3%?zx1IYSSBcS2M`h`G`fdV{nVdrkLb+ zlcMFG%=tRnp(rA6k#T?4f%Dr(pF9vZ*(J13eDSM~zE^pp>uclpZkiOe@Ce)%Z< zFG^Y)S5{n@6EXWrT=cTwi5^(u_tIq4#VdbqT6S;H8LrF4vH>&HC(JuyESD_K>L(u&!)|Fb^rG1 ztt`8R;|l8zXE)jT*J->@4$do7vn~G}Z+jFSztwK#?kI&PhA_J0*P_%Rkesv-6v`;x zIf)5UzUZKn-I2ABos@LDTw}fYgB5j$8sxH{x~RE&nN4`{Qthw}+irQp*==!WwtQEk z9qIURNbCUvwny8h`P(ndJ^f;>cc||8oTgz>M`vZNO>%qLH)ef+MUc5!sD<5-t-L;i z$89L6TN(B-^RWBwb(5c(?B45hVsy7c!d*mD}ruKoU;f@CdD-x1dcGY3-k>< zxd0B2Ijq4Wb3b#Fu(-M01)TBRg`Ci&xez`pNX@ctGc4zX&n5U!!%`{Ct=L`aA|KztVkXY71NK^}8h9dks+EMJy4%Yo&| za%YW(D+lNyXel9XHgPrwah-^x%?p1$Be=JK`!%*dW!e7p+RKu1zZ0;PfCKgj87K?c zFl-otJc28HLIawA^_bju*g)V-iTgM(NnM1{X9>1$QKHF!4TZ?CA^6Y$o1z zhAB34g;zVc&j5|GeId~ zV%|W3uVo-DLaV@-Rtc5E1HM91$LsU>ASFR)8;Fpn62tC?Vkn8hhVc6rxsjv+l3D&M z&!72N1!QGX6@c~EylltYkHN7T#p~I~dhqck2cU7_0p56zpU;DUbEZMSYR`s%TNPd^ z!S%HV0xb2RqY$w2Dg6be(Q}`JIZvXEfkVZygs{%M)Y|w)%#(xT+c%K-?#s~c# z=k5MDRrEPEpQN;SO&Q~AKnRM9)Jof)XAmDeuUqHYg$i+gyFTLkMzCEE;@v&=r?EedeSQ3n9y`+5XUG0K-T;C(gJ2&Y|GN%;myi8?ykn;?1nleM z^#DBx_zzL=AAsOLyufP(_`kxjV{Qb21Hl-A2?SFJW)SdmbGYI?X%-MHAy`4MhF}8$ zKmP;)UuDNX*8twEgulJvz_*6@&pq(>h5yQ-3;#YXkOJEghmFq&ohQy{GtdGGR*h7q^md sEr366L%99)mOuU4*RTAR+?*cmQ-nU2z9{ z7pT39p@xSe)LEb1-OiRW8yS%?6MzW+{=fAv)QGorYB z9y=S7rp2TO6=9^^cjDyBi?#rqI}##~N%>CaTLsb>YMLJO6Czq$a6vX`5?*MsPrAle z?XP>CzrPzXzA?@VB(?!e+j>6dc?==vq^j+ShzFk&QK`hM$gT~;@?LcY%1O-x#UH4Z ztz&erS{~EJ)!^DHm=1R2zv5wB>+Lo(nAo4EeLGXV_ROs!e&F@-f+Zad7IQshlOSJ( zg_BhN#k)I{;$197{wccOZC;einBWq9N4nQS<+wvVFR&D_7u2ri7ITmuV4SyV$XpLC>mdrp%oOG2<-J+25~w znL{gbc522@$X!o%wSP2{(K0_YQP{>S!BX!3Ox53{phss?^I)u{JYA$mVtAA>l7-hP zK;P$igU@D+8hKdji7J$Uh-FLt>qv;|*&|if?wY}PtxgH1;4E=v&&IqTzuB}B{ zM>V|EMW<5GREEYjhbHUt?8-KX#xn4lUCdFwrcMzFe0`ZQX^#+ zh=}hWJQ~|@&AzGV3B@Cp3wVNPxWV^K5An$&w#$4rw68+oC}i<&5bT`_X{kUGES#e7 zIsEAVj1v~uY9}zfpiROb9QYURwj6E_PBtbE4mRv2R`z<)BXVM@px)=E>(sG&avccW%<7>o=&s%lP#1GaOIYrGbjamsQ%B(oeKs(YFoMUNJiX#WacZ)w1?f zb0iF&D3v%^RXwbVWu_CPFDPxV=66;@%*u`w@kqk5wzbYKAt8qepn?ZqekZ#-Lp7Y6 zmVVb48foECvt0Xl3MLa&x$2p*ZEy@^pmFDqMO0)GjI)rRF+Zt8DNrL;^rZfNS^nMD$l3|Cy8! z;H&{|g@5-djp?xN;sifs z;YPKV3wxHWd`dZ<1>NlKSemN-)DGgi-WAJv9&KL8Jtp2Zy-TuP3YaP9r{=8%V=}(o zu<+9Aa0@J$jJKGXz^GXxdW?6v3zec&wSu6ODNa6vUi>;z{vx?bhDWh}3DxHrFO?xu zuJ6zHN1qLkL#7%+Tl7RI2On*qXqc)gXD`bWRSY8;^j4IICh7sxw!*vDOT*?S42g?v z%)g;;wvdZnDCKrFZ?2$0$K?Qru!~ifI9!UYGp%GG!6hm-H)-+n2zh8m=^MT6sjsPt4Ph&i zMf0q6i!5Qu+TQhJlu?5f3C`gd^V*7(MBhv(7~5$lsadNCD?`clOzxw-GmSqCc}nJ$ zT9V~Q4RPFR<)0-v^^o@4H0K<2joH2F;9f3fcZD`AZOlI~O@A|a>2dP$PTP|kMi8~E zl(<2ve)TupQswIK2(#{Pv2oz)l9D93^K|%t*(s5dV}mVNX2?zhLS$< z5{#Ncdm+77vZLh|uTgD*T4R<-+%c+u-m3l9bN{24vL=s%aCnhh&>KqD?Qb9N6D)T1 z#|_C;Z+(km$Wa?S3y|o?R83fleB0ak)@S$C+c^oe2c6(k5g}Z(!c}QCcANnyIMA5Z zzLHL9shMvT9D6W8v`#F&naPvC{v>alu`h(SEiv=n2cg#^{rFyZdE;;ERE2U6l4Y_` z^W&o8t!-s3_x5UwK_+bHab6Z7uHLULdWRXlB^?Wf1q5*L`%2z?U)SXs+@i$Gc8V&c z;u(LQq({#wP`1tYeT6oI8@)4gZ0N)W5Z~Nr>^yZ3;^af}B41s{YM|%Sbm<8U)8@sv zbnq8ZXO)U4?&8mVf}MFZD1a5BMb}1X!XzghVT1-SHSb0kj@Rdf_D>a}Ol?dySp zrq%XjW=QOs3!46&`zP1tb$n*STUmGu*f7eWS*vNi;di}RC|MB!GC??)-x6$>qw+K>htK+WOiks8fGeiDbH$HywGTvR&<@vtN z1scLGM6%1nJ8x6FX~nu@MR2!V3&{uGAubrw>~x+~+I2FJR!0EcElxhH@JD`=yW98v zEPD!(oJ-0pLIBHAc?hcGUM)xXrT_atx1iY8`-g56bP2HF61rUN6}9Q8O35QC$xsPk zs?m1Q{uEPhWnaGu1WE5%&w|-Uq@aNQK*;YDXQjs~4h_-eUPr6;ZWsFFI{8+h zTTA(y^|#@B;HgAqHWH$#L`oP38&c?eoJWNneaI6$d;~7hQKQCjwh)~!91otIyM3(^A;_*3YCLyq!4Zb?xf2a-Rz;i+k3uWnn1CH)rj5x_pf`n~d;; z4hO6eJD>V&{-d7T!|NL#?HUIT!?On${E@o`y}{(GO^N+VT0%-3>a`rB(}fd}$8pjb z-@Cn{yf*^ItMt^pX^)GdUKn!M;9@fK`OJkL(X>O6ql({mAQRem#3UT@zRJKQeo0ds zkdswh5yvW#?@84{hv#UL8=TfekY2uG3XxalFVTEfoTR1YX9hO7bM%xqFP5okj8$ph z=_L8HSVpxyI^*<2R1>PNc%jWy6 zGS-a6@r*Y)&5OaMhk^>ds`9ZWYAP-Q5!%NO$y|s$#varxD-I=`P^-z7uyJr)Bu!I< zik9kov-Hkq%p==d4Rffyy7lIqg~qX^pmbIZzlSfaH78oEb4gFN=5c^bgT%5mg>coY zYEGo}loCUrMuQ33EMHMeUWQT?BoPQH&d)M6wDv#K^$|0mYhQYEMXGs*h|-a%rC_eA z9^p1oIWRq4s4oBkUIOaE@$b;=@<6N`_7W!1H(fa0^9FF7W>taWM~a3;Y-JVihji^+ zzv9RC0h;Cr;>DWp4!iRC2U(VS#d6!V6ME3slp!cDguRNqX}RkP^JUvhWG-{m!PpVa z`+(7WqQ--{`_WjZ4AZlYNJYmCU%TtL$roAso=ckk%Z`k+d63u?|lUr%)XU^ zJE%-8XJ$XqUrKV!p-z4%Q)?G>$lm6ji=mkvX6YkEiX^u7mgLE@n_pJ!fxszEA2?f5 zg}41!zgKK$7Y|#g^AC!Yt0w=0BKdI5_jUwsvj!|dNILFG(=?_VB{2KBBbZ8{VTy(L zH@hQgd-{>Kk9QN?GEQp|72EKs`Rfr;PxSf+luCQJN;;S{$RDg zcR@e7?QcDh?Z^-AkBPc;k-2Z=Vwif&*&CuhWgKm@O)gRs-_4!y5-Rug^u+G-)M4T` z;jHW0K+So^@F?3%n(a>ci5Bf%#0ZPCLGsY_9QL?PyeI-=?Ib}Es6;nB%B+c(p6ZQwF^2t{U0r!-g z?jmjoDp^7Q?2SfiSw=c|pGIJ&IM5^CWNFiF>fyr|En0a*jP|Y$!tr$Ix4oodFx0uaS*$%B8sn z2Q`O|j2d{`*maE=+DT#&WwanB)Ah$eUg$Zk)uQ#9)5emih2dK9>!Q8D$G5f=$;sKK z%@cluUjOttA@Vw9jcVh=wt}>I@p^n1ulaMnhxGQ4lzw^~NXt!}&>|bTcuCdxRVi|G zb+mt0C&_^SHR)CVvkD4INAxHn4s;-?uI|Fjv)V1vVrGpmU4kn9cv{5A%_KWfh`uxr z-P|`5&+Eq{mH;%pu!=+S7H*fML+Nn1k?0QaREfiAn8){BWY zu)TKG#d+T}0ZVk8P234Kvn#-C^6L>pRj7~%Es_o`FqmLBq%+z11KBYKQK)OEmCqhQ z-or1A4!SwZpLP?;Ne_>zTh=NW9I?19mgl1tBX|9-Cu|-5WVM4; z9WS+dh~PU-|3#uhHe{RHY>m)MsLSz|E_2)dL!FMzTp8tQf8Wp`>}q`Pwy-iSLVGog zY36XsZ@3`0OsB5_zYh z`O6K)ZNb|^+@GRj@Vaqpq$1}G9Zu7&R0RHd+5BxYk+2U={+mz*}C^NzB6a13E+y=bO0Dc0p65a;9 z%?EB{{k_uvL;?V!;aLBu`?tmao@@Uqu1NeB@jo)Jiag41gaZh_JDdG6yC(f1@qcM{ BN0|Ts literal 0 HcmV?d00001 diff --git a/pandas/tests/io/data/excel/test_datetime_mi.xlsm b/pandas/tests/io/data/excel/test_datetime_mi.xlsm new file mode 100644 index 0000000000000000000000000000000000000000..55fb88912afb9cd6685123f34c65b612786d329c GIT binary patch literal 8700 zcmeHsg;!kJ@^#|`3GNWwH8=#fU;%;!8YeXFPO#t}LesdrOYla6Lx6-vLU8Q>0fGe& z@b%1l@0*#-{QiRX&Ry%C)$87>b^4y#wd?HCQbR!{2A~5l0RR9!pz=jep&1eYaDoB= z5CJfejpSXNJweW%X1ac^AP*BBUnfVpLR4h7d;l`y{r`>s;xAC0Jf_jlhp%{{{8N65 zJ9fE222}6+Ooh zjA}qxr}J{5rOWB|NiIox@${502FW6zVf6CaUT_uleP{!Xezgj_1ZGGj5@*s4;0;4_ zqCN|7C<0TP8;(41VD8Ea@p)j(p;Om8RK!{i|4?kX06VNn#zZ$fZd?A+$iX7dR|Ag8 zj%}V^WrM$Y&n|G-TZ-!|FPtu_y2B`q;_X*AlljG-&6>ZI%x*MtC;sxt?dtn2IFEGK z1jtpOV=gKTh%u@t%oD$Ier?&{a2H~IndNuf&WbV#tZx;` z|CanID|jxobQf1TT1?M)rs{gCOB0|EH(J%=u@*z$U@tu^hhvkq7-%BS)j5akS+t2#WeUNTy>CydNNl zYYrU%z(af)Uq@aa7k39s7Z-=$h^tu7#3hT5I54m2{&lAf7K;p_E`LcejKfQ3*84!M z_yZEFfl&H93vK1~st+!`Q>LURx2og5!<$cC7sfX~Zc5ia$Epr0P%s8;FvO)+j2J-U zVTX$qk}|1N#p9S{%n7hmjvs+P&c=3uBg0VqL{b2RLkap>Z%dS8!cKN3rrh>HYQ$SK z7#~FKsU8@t4u^aTeUiHusUd3+0}NO0#*pVC&tyd|}Id$=# zL_z9vN=|Gca)vu=;k0?6dT0pS0_X5fQVHAwgmr=W{^PZ&pN9%3CiR92%B-Xk{&8pL!y=CFL0?VDBn$m3sRt zsxWM51e`Q4Z)(*xYJFj->Wh_PF5WmPq|^>eQx%ciNknQ4+KCvY`{C#k?=L*G9TNEY z6vh9QRL;XAz6_@R3Y8Qn9Rm+wt$bfBOKl-Hae@X()rC~4Uer#7jeDKHy)-V%sp1*S z#LqT3c9o0|jX+sPvQqFrq9!3~!F3IR7H$N!Py#TK5UBYVyZjkD|6v*=MCFI@^4~qG zlg8|SuY4lz!qNjJy|GFlBl8fsmcd&9eIo5ZYPdD{bFkTZ=(3Em@H}QM`mzU;laKUf zz6LS$LPI_Yo9moRyfS3*n%O_ za-^jQ=K2){Drgr#ols0}gfsL=U9D~Olq~-KNxsCM?hR0WY3@^gB|Fo}f%B?hSBZ~H zBBL1ewM?p=GMQFN40lXqPd4+@_hXk6lE>iO2>oknH`6$Whl7$d-GLIWPnoKQ8wN!+ zNhu_5wob_YDXSV788(d~VgrQ8s^o}`|94jPumyoUJ$Qeg9{-kJvr}YU7WjyV50lSn zxM;nBZiM;7sm1njTK)!lDb7!_2I%!SMH_G-8p)+J=aT2Rua-coAwflwPnbQ6l${t| z-v{@s`UeI1J-~BO2>GgR$1{d`!;QzN&%zIf3-;yJD*0oO+sThXNE@sOO!*V=Hd|f# zB6R1(ZN};7AmV+{$Fr~TbGFpAmcb;PvZKX>Gw;oWnA^4EOFt?TGaYpXGO+QwJtHsQ z=CevY4#x%Y6IBZeDZC4%j5gcuoN~xi&J82dHThK8r4WmSPh{%a=&2R2z%QDWstr>? zy6HZCA}A|@C505Q=pf=8G?u&;0|r``qFHGVcXr)M?Gj}Q79ZS7V{iIITY5O-xrSkx zl7Uy940_Q&2QfSLgv@(Z^v0JF+Fg)>RuiI-C z>h|la@p(Dv>ja#fc*9=8{`XOduo{SE_GDPna-Wb#N!_+$ z&*cgrAW!{tBEx|7iqVjb)^^Fd0}PW+QeOB5z?8hfT_* zt}o5aF|Lzs=l336Ms+508MOjhF!meeSx6cs373cAH<};fw@=A_JkqoI>>yb{VF*o~ z#eFq_#zJHlPpru<)WCATc=hMIdc-_j~Ac)f<@NuRNj*z6BMnYrzg zn?Aa_v>Hp%b!$kloow#O65^%+X>X^@O5!KWcuNOtOjI6fuU>aYH(h((Z{FQ}b3U^< zySdx`_E2mOJIWM8OcBai%T#_c| zA_^^X+ zS+~}SWOn?Qbf)RlUZ-Dq@Qq zq=T4|Np0_~MMG9u;)j*{81qPnscFJi(n&c>v}?#2!I!3?mRkOS{Pc|bxGAc^{uP~? z>yMMC(ABn+#i)=U1qa4mrrbF z)IXG@mSs(o!8bpr+wI57S@ zzZBp#vNn^-#^@l9<|JlATO6C%+Q1_7G$x@-Bq57=n=U)9s@`RtRLET^V{8i_e{qV5 zd5i&-_PMX+z^Ho5PtN>V5jx&nN&mZ}-P5QZ$>1M5SN&!U&}I%bXAEW?o$s@WX7{(Z zWGiMpcSqljMRsRckNXCCeqN2G_1qtepVewYn7Q}M@O!?>dVQe(3p;x$s~vGJgU`zi*14Y7aQ913H6|_ zxUqyWrJ}u4Iz8{?<7uioNNXk=dH9THwebCZ(Yt7Jxwq>6QVU4~9eWZEU8K2QbOXe9`9|u~HX!+7S5hL(K z{Inuvp@Uq=W%r0j}9tyFNeiu>lR>I(?mtp7YpUm76>$`PB|gqzN17{ELYU|QeO3V=#(a4bG25)xnd`I=%EJW6&!Hqx3J zvi*{ztSO^2z833ahSIBKfr<uJOT$r2G+I^78ae!fhW#a&GwkB9E^evS42DH{d2zP zP{tV}vO=9UZwi%M=Sv@pR0^_&Cob9ID7}2#dE@-g3_CUYV;dWvFE$BqVd7}c2wysRDTzuR*Ro2F7KSoUD2MdZXKTKEUmefG+2z`9r>;lec|@b+aTJYL`aQND&KWW_1#or``ygpm3K8IxU_S$syj!$VhcaS>It9h_29n zVNApSb6ba?n$MnFkAwvH4kynrZgs2Y4%~9i^<^{ZaU4LdnL-M<=!j`I#+cmH1;f>; z*KuI1K!Z^$FOn-&eWp;6-QY7ORDt$AvS1u@K}wnlkPwos0LgQ&dh{MZhx?RMvsy7q zaF?<37ZkC9g*?Daaxd%7;<+HN#H|uoP3-~h8llNtr}qCFNy6rMa+;m6Ar5>C);#eP6 z8uvbRxn7&^h{YynAno#wPj?BEG|i`H8nw+=o#@stgmy&IzWF})4ZZtf0mwEeQ6nm8 zs~9mhVf(l>{5)J^yk1g{Ro1w95*?O2%Om}9q!(|8H}^oySjQJOByek|V77&EOx=Ga z-;D!vp&wF@eoA(Rccxe8byoe!9NZ4};NDKvlI)*i{e;aU?ID;b8Rq46a$8?s#e4bU zv;3()5EeX%d~2I%I~{#`le{FL^CNjlFJ-Qso=;`5yPNe&<0uU5NG(6vV5;!c>5`St zYqKys5WR@v5#5+tS%}q{;^M5?o94DMR_Y< zV2=kQJFq>z{$sa;#hdrM;uN{-J+v)7H9mNX)YW|l90p4bOa`orCU3dEw|1Dkd$@UY zmp9*dgOccc&x?e`0CWT*X*N=1(~L-KwBvk2)$FNb&6nKnev`ItLKq?IMy1qisq&)N z-Y*MPk#MjBZh7;Tcr@Psc+jO?2!7~U0#Db zihnAFtIwXhMj}vpgg6ZnAd~_tmzP@ZF0LNDFJ0V0|85QaPar@fsd(_J8bTl#UPZf) z0$=dl_Lhd%hJKLI!e7=3h#NI5a-sz9eZBLa_N=11UNdrE?a=J@N!!?{swfgNC<98e z?lRF6*uqpxs>BSr$&SThsNK2!mhID%xkcTd=jIak@xg#?tu;t-SjJl_3y&?nZqdN_ zRC9czB0FL}O_~NcIQ7HKoD^}%@;Z?-nMdP=UmLq`HDL@{Gl6NeV+riUjWAvl6WiO$ z6cz0rr&)bB$%W{_GTZqF%t%JpcSe%8kMp^KLJMSSXu^@kvEnS~}nD?X-B5S874=E@U2P7=R`Z?aQUH zE)!;!)^p5OvXK3tl5)5DHzYuVs-QEF{$zhuFt@scD;odFZKDOyAI{#>Zpx#a!E8v# z+9*1aYWMMkU9BN)iWOfMFj1?M>crEd65r4{1EN>A+rA) zc2*@ya^f%syCnHI%6f1%g*}L%KsmI7U|%XwrJ@;BT()v3X7!y%-8(9lyxk`!yg5Zj`mp$iWk;@rS`(;>Qe-E zG}#62lavU5Z*ws!T3^{~eMEgc*3kL2p4AmskZj#{gk|_l=H6k!H|Of8LMj}UOA$A) zK2`=Rh{Hki!{<^AsOsiR#_n+Q1bqlYl63r=M}caCb6x11mjUckIxYFDg#}?;XnC!c zRVfi23iL)G0#s?-0hIFWBc+UJZJee@GRcW(nP%*pYWkgQ;p3+7WEG33OGK>ZXX2Z2 zm7h!9Tq2fVu%^O?JM3PYGiS}5Rzyd#-RH;`QamVT=`@0T=P1H4B^{mSv5{o4k>e_5 zQX^I8ymFkN`4L0x;?8-AQz~*{b=&`T^m_nXOHS-$aa-d2Ba99DK~g7S=pJNI_VmSR zP!jL0;B6qZ(-iUoB);Lua=Du}wu;!7e?>DAGB@D=>eT;d_x>~gL*HIY?XM30+EVY&kVN{sgFlMZUrqnYrhga$05p^Uz~8v_ lSM$H-vp<_JQ2)vN-`Pz|4Gpn30KfypC552xa@ya1{Xh9wA;I?87=5_I9J zE*0Av>cLPLuF6iZmv@k?|McYv;}{Eo*0yx2i|S|Y z@HJ9FeHjd}uHH?AmUJkss%QEFd2`#vK{~Yf)z9Xa1)@w7@yFV_^RzgKLna!OH#IAA zZ)F&+5L7Y+kc7yNW7jz)}yb9It0 zu1ey@7hDQYDuqy5^F;pJal0#1gV42T&sY!rDQ9PG(nFx$b!>w4JyKFvx&f5a>UBmV z((yca?Cl5W*J_`0I2%{c&-_55QPDPZY6K1&evECk>@{*8mqtpz1t+0sM5ciJy*-{rBDV8Ql=o7i1j=-Ov7nX$M_MnuP>)aJF zICw&!Mz6|<^jjAXWJX%oG+CF5wSH{R>5J*hOgTj#de4po#`5NpLZzV zTg_GZL+vRqZb-B=tOaap%=26YseDWMNC3cZH1f7*^Kx>vGk0>b`z=ou zX&E_Xv*QKi*FU~)w?d5&$I%olrnXLMEwydCq*32`jaxx7Vn0<>cDv#Qq`ZedZzZ7N zz2q6(UiPaB_!(f}x}T>wAdL_5-=vDs&V=eU#7`WRe|=7=WTUhEGmtE+I2>mZhEC1_h71v5pp+t~`?f`M2{pE3ijC6cr(C;W|` zDx{GvF0XgbD_(~m53+I_cj0DWG&1VZhT|{=AdIWX-F6545G3kC?YU$}I+7ychb(g? z_lYnO2PSGY52EJx)ZE!{qfgp4;7A;cMiTsJ@!8q*9^mgdEV4^#eUMR1W{782Ny*z% zo)VUoB8mnE@K9#o)|-)5;+Zk03kQT75T2{{QHS@!2LxA-Lztm+?X8e+Jc4H(*d+;NFUA(<`QHCp)c6lJ+|$tDG6?!bFaatsu=O;Y?pX* z8?q?XD2{tmm9d?O@nc0hGxqr46=xr41@nnt&ihw>?1~>QH{AsrmrmGG;dpr z7DV6>?$+6-J>rAbSU>yDlkNS`Jl{oZjd4F;T6&fJ@M3BqFShSHw9V4}T^`M=h;Nq> zS}n8v&?|Q85#OD`O=jF%1glFSVx$pnO5>q!}7Zr2P)#Ni?9kH#ytQFKi$6WWA9P z%>?Vld1PCstrVL?powsGW;@|S^yl_YApy@e^i1<_L%UV1Yy^8ZEV5!3G&B9E*5Lsd zW%+f*MN1rW#`qN1o@NL!$?;*6IEO`Ot*!ng^+QFO`R@K^_|R&r@8>#GLnbg@{GB;* zZ|0p>VRu+y>4gM<0taKxKQhdp`SY)A0|zViVEp^<9u-N$w!arU;SZr10V1BL#VOFa z6v@VcdjMqud4F1%CFEO>$y&&gxIFJ1N)^(Q8?}R%*hYa0Udxq=RMJ)fRL|t9YWi3! zK)55soTG|2)T_vhD#GaIy5_#9M_#bsh+MarV30&G={N$~_-?}a`;!)K8-LQcA|eCy zPtTQ=nupE^V;_YIg!eV?^raSO#tO<9s80_ZSGYThy_^z20xhq_(`@7k)l#Eaq9VF- zXym$wuSZ2rAbH{1x1=t{F?JXOB4nKb!p?Hk<%6{Y{Hg@R!grgeg#Qj7x(0gnLojTB zVUd*xw(|WgXJds93nh&UNl;vC^FU*=gz%3UGIwCqmp3e5j&u{tI?|j)B9; zt5Fbr%lC*D>Vxea_oBPFS=>bj_hM)pUXkW*j+oA&sK$hl6$jlOq;G*V_Fchq?xj7k zAFvM?y&0L46uXT>&yCgV!RhOM zlQ2=4VxBV|lDh!UZ2Bd1vQb7T>2y9TzejQmeu%(jGx}1pFa_+coq?<8zg9XFyx!C* zQhk7I)K0**wVrZf<0uZ)7r`=ofj+sFxQh^tw^+}cgjrhXQnCr(<0Dnj`u1}71Ub=p za27GLuRn`a@0lI(o3rLA(H%d^%syHO^1XMHO1Iqx$zsQBLqnAFc=P3>TZcil(M)C) zm)Zowo&0o4Itf0ro8BJTx!}DV;rVkdt8aE9g~WO-Y12U8QAAo?n^-(m27!8VBmsX& znu3jIlo}21G6z>3s#d+Z?TAw$(>Vtux{tW+0e4N2@f|%M(J1#bm*P>xjNGyQe#n}I zt8O5ZSpY|cci2KBec>GmM)CZ}>n6d;Bs2yLa8_RH_=cCJCb`ODWc4cC#7+*&crjkR zznG^(RvDhq66WFoJ@qp;CEwSbk@dG8j~fqndyW@Y7k3Zadl&-yXm5;>6&~U;x!EGG z*W z<<&9!&J&(e*YA=~(2dT5 zDz~V!?G^PsfiZFq9Uneic-_CRiRfF{*1J zD@zEUDbNh6i6BRBU6nzrmS7My?0b`I`N3*`rU1{xpl?R$z*w#6+)hne!L>PpHN3&r zt7RVFqIV}AcNslYq65M(_A(Jz{zF|uS(l1$OF#CIQxxzTUY%NDeP{q(bsVL(Ifh%sC8W}8vO7c6y3U zh~LBU?%A6zk)ZRPn?94;mIg*8M`Riejl<~#lgIme!ex`LhvU5yzTK~MC%ye$KW~Q9 zyB<#jFKX5o@!2{9PEU7}30DFxwpA0z<`+oGGL9*uJ|0)Cy*i?eF$>1HJAo6+?Pr(l zhlpeEc$ORvL*b1LDE4|iY7TlANOeCUwhMRJ&9#!Bv{1)qR?B`wTznPlN#rHCT9!3@ zeC(X3yp(*_64e$po@95(s#pQI+U6?ISy!Kns{wxpM&m@i=kJ-+==vxXOIF?{6}dGd z!nv+o6Lo%wRUH~RMI+^EN~6wNFJ&G%+zX}%n?D_0cBt+vv6~k0w*Z;J*1;YaS-SOU z*qZg_4|M@)4j!`37>i9#*PtFb+A20`Y2Rpuv)fn9VM^Zx-&~MLbfKbeVB9@7>gRk_Ch&0?|KN0ygiMr{In{f?q zw@m0znWmEcP^@5re$l$fF@*y|%Iv-|xTd2pfv-+d9BQnbTtd7F=%cV#if%}xmlTco zw{kRnbK@~ytn$Ebk#ELKG0Y(DutVCq6!zm#^sLf-2@d^@oS6ch>IO|smPHOa5s3Y5 zi776dE=RFm+Jy+4MWCRf6E1~B;y)Nl=a%c+AEso_X7ge`!IRbQz)jwa8yJhKrTgji z`MDRI8xmrI!O2~UU^YE?TROPz=@mJ%ov(G?Vz)D-!tI-nmYebP_2XTc<&?2#HTa8N z=9&Jdi^~fM+stloMOaZZMxX_Bxan2`Dhgxq*D7{*DZE56^+7dcw4{JI~wqv@rWjzm8~e0=Y%%T<|yRm80mZu4#j#JahY+qY3yr4T>3wR=eDjr4wZ**UnJd>` z945A%3>95-FfFadc)yV2O@l2;6C;S*?gn>yqh7~^y6%l~1ZUUaCvDw$ zt($(fo@tMs6iA5&3C5^-zRj@6>*a~68ktaKA}3Ov%jzlIXBEyUdve*Ky2M95{n-u4 z=DXGV0mi+r?#a7vgf@ZYr&3>Q8H*k7RajiB>{wecaHf3}+ty%Etb)6?3fs3lM%v(wcGVh!#RJKBW#M? zyN1sNdJdkp;g$^|^avd)@5X<86AU-5G&lnKF4ats3LkfXfEe@Bj%xdSs12@71}L^P z;J|l!CCI6YLM2Vl?hEcIm|mG~F$7q20WCO9N{(?P<%`hmrzGDKYQKMl8Dn*;#we)nAxLq7*E}vf|cr&@$L7pO)Q(Tnt=7E0~u-(|^mVpGD+h*Gb-ZDvxG9%jLD(=ToLqo7gB* zEXshk+g_5m;$sl(+R_cR+A`L``cOut4&m}=AJX77-L$EkM!*l9Va>}(dO<^IRUMoAR#<&X^2EFD!4h+;>&kf(p2R40S)=DF zB8Z%McL|QlFjDEYIZC3&Cc{?5f2jawgm^+sIT0 zFMj=8MwUVIT=}VH&3s5(1bO`7%pOwb)x19afN&+hh_y`k@TfIMQ`lvg%1DifB%Op| z!#L7J@-(a17ibUW4qM)VfT4!>#3!zM8)=hGvq82`9==%8_z}7nm1X)gBiW zW2TT+h#Sjxnwm)8B;6PqtC$;if=H-`$LW1dNjclK&NrzuKmCcIark@d1na5Dv%BO) zE{*f#MXl7CR!Vk-h0ad88)l|BSpT*xd; zn_S<&7+Fdo5q>Nkxyz9xuqt-dqEAl&@iB7!Tn^VBfgY(E>Q+4z%LD56-q-GLS9)L? zn7-zEf^&-A2ziaeZ<%}e#oDzyL*!0_{au_N~LeF52sp3 zoI?f`Q)xvXLs0FirYjKH?u<`gGr|d%aCs)%W3HgnW9yTRAcHf|wq<^oh&L4Lclvoy zfcoWTzK@*foGxqTs_n>hMqvv91Y!M=U*eN;;x3!+1M$Brg}o$S$wa_ddJH=aV#AaI z3#V6Vu1?NwY_FVL!T;)SDUvd*kFAo@N`~<y5+(($91lH!t_c9{@e1(lC||@9q~G9 zfpSPLrbso% z3{b?+nW)4??O*uTehCF0i50V5d8rmo>+S|>!^`W*_o$fx!4g6751Xw`m{|N_gE53* zi1C#yP6v#w`=n)znuon2Y9YAV#OG}!dzkQnARaI8qx2?C3N}9Ek`>!7Ni?SE>dq0= z)}V{(a2sHixNK(-ud>|-;ccCnvuEm-3TgdAdO4P7+*UUXO0_e~HxdD236$gZ2lt^F zTW9BC#oNp+v!FwOSwGK=*0z|sP205#HWF`yq}-dJ`kyeN;_v6U%H*<%3>bKsV95mQ z&)_w4cK#oBVW9oz$bpPHF0f;T9U5 zKSTo*N*lmMAC@nz*suAOT-uHtIK-tlj0VzTAo#2GPO%GGKR!Nqy7Zw^Z;^mILZit5 zbCo}@CG6i~VhsTZhI-8g1mD(tC-p$?=#|lLtG5aGfnUsf*z9ERZf$wL={f1iaBchd z8aihnH{qH!ly>k!{LyaSJNM?eR5T2MSq2zT6D^J!$Y`hf`P+Ns7R8NMpw2MUIPDaw zB(d1|=K)FsGaX1w*ZvHX8jS@jZwo`25%ZhO%TvSKq$v%+*a%|4{wF0l$FiA`>ge^) z#gh{dvrHH^l(gIF!$yohO31t;E#|YB`x@H-lz%CDcMV&9;i>{VaL~0XcgB(?{T&7V zcCS5qaM6H(xdRmb!JdzCQY;__x?5in^wJ=(qA3?wXOIs!{6pNFpB)C$@r_`udS9pOe}qLzxw%AmHfjK68iry;%^$|uU>u?cmD8#2x}t2R`aXW^Q(hj z%hNv`gc1Di;E!VUSJS^{(?1Mh;fVwQ_*-uM)%>sV?9b+-q<=F1XLM6jLWJ!N0Pqy{ N6NRPkZ1Ug!{U7#%BdP!Z literal 0 HcmV?d00001 diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 955db982f8300..ddc631532194a 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1143,3 +1143,22 @@ def test_header_with_index_col(self, engine, filename): filename, sheet_name="Sheet1", index_col=0, header=[0, 1] ) tm.assert_frame_equal(expected, result) + + def test_read_datetime_multiindex(self, engine, read_ext): + # GH 34748 + if engine == "pyxlsb": + pytest.xfail("Sheets containing datetimes not supported by pyxlsb") + + f = "test_datetime_mi" + read_ext + with pd.ExcelFile(f) as excel: + actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine) + expected_column_index = pd.MultiIndex.from_tuples( + [(pd.to_datetime("02/29/2020"), pd.to_datetime("03/01/2020"))], + names=[ + pd.to_datetime("02/29/2020").to_pydatetime(), + pd.to_datetime("03/01/2020").to_pydatetime(), + ], + ) + expected = pd.DataFrame([], columns=expected_column_index) + + tm.assert_frame_equal(expected, actual) From c21be0562a33d149b62735fc82aff80e4d5942f5 Mon Sep 17 00:00:00 2001 From: Alex Kirko Date: Thu, 9 Jul 2020 16:02:32 +0300 Subject: [PATCH 077/632] BUG: fix union_indexes not supporting sort=False for Index subclasses (#35098) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/indexes/api.py | 8 +++++++- pandas/tests/frame/test_constructors.py | 6 ++++-- pandas/tests/indexes/test_common.py | 18 ++++++++++++++++- pandas/tests/reshape/test_concat.py | 14 +++++++++++++ pandas/tests/reshape/test_melt.py | 26 ++++++++++++------------- pandas/tests/test_strings.py | 9 ++++++++- 7 files changed, 64 insertions(+), 18 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index ce0668917f800..986ee371566cd 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1117,6 +1117,7 @@ Reshaping - Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) - Bug in :meth:`Series.where` with an empty Series and empty ``cond`` having non-bool dtype (:issue:`34592`) - Fixed regression where :meth:`DataFrame.apply` would raise ``ValueError`` for elements whth ``S`` dtype (:issue:`34529`) +- Bug in :meth:`DataFrame.append` leading to sorting columns even when ``sort=False`` is specified (:issue:`35092`) Sparse ^^^^^^ diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 4c5a70f4088ee..9849742abcfca 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -214,7 +214,13 @@ def conv(i): return result.union_many(indexes[1:]) else: for other in indexes[1:]: - result = result.union(other) + # GH 35092. Index.union expects sort=None instead of sort=True + # to signify that sort=True isn't fully implemented and + # legacy implementation sometimes might not sort (see GH 24959) + # In this case we currently sort in _get_combined_index + if sort: + sort = None + result = result.union(other, sort=sort) return result elif kind == "array": index = indexes[0] diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index dba243f1a339a..ab4f7781467e7 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2542,11 +2542,13 @@ def test_construct_with_two_categoricalindex_series(self): index=pd.CategoricalIndex(["f", "female", "m", "male", "unknown"]), ) result = DataFrame([s1, s2]) + # GH 35092. Extra s2 columns are now appended to s1 columns + # in original order expected = DataFrame( np.array( - [[np.nan, 39.0, np.nan, 6.0, 4.0], [2.0, 152.0, 2.0, 242.0, 150.0]] + [[39.0, 6.0, 4.0, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]] ), - columns=["f", "female", "m", "male", "unknown"], + columns=["female", "male", "unknown", "f", "m"], ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 02a173eb4958d..c85696e02ad39 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -13,8 +13,9 @@ from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd -from pandas import CategoricalIndex, MultiIndex, RangeIndex +from pandas import CategoricalIndex, Index, MultiIndex, RangeIndex import pandas._testing as tm +from pandas.core.indexes.api import union_indexes class TestCommon: @@ -395,3 +396,18 @@ def test_astype_preserves_name(self, index, dtype, copy): assert result.names == index.names else: assert result.name == index.name + + +@pytest.mark.parametrize("arr", [[0, 1, 4, 3]]) +@pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"]) +def test_union_index_no_sort(arr, sort, dtype): + # GH 35092. Check that we don't sort with sort=False + ind1 = Index(arr[:2], dtype=dtype) + ind2 = Index(arr[2:], dtype=dtype) + + # sort is None indicates that we sort the combined index + if sort is None: + arr.sort() + expected = Index(arr, dtype=dtype) + result = union_indexes([ind1, ind2], sort=sort) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ffeb5ff0f8aaa..ff95d8ad997a4 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2857,3 +2857,17 @@ def test_concat_frame_axis0_extension_dtypes(): result = pd.concat([df2, df1], ignore_index=True) expected = pd.DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("sort", [True, False]) +def test_append_sort(sort): + # GH 35092. Check that DataFrame.append respects the sort argument. + df1 = pd.DataFrame(data={0: [1, 2], 1: [3, 4]}) + df2 = pd.DataFrame(data={3: [1, 2], 2: [3, 4]}) + cols = list(df1.columns) + list(df2.columns) + if sort: + cols.sort() + + result = df1.append(df2, sort=sort).columns + expected = type(result)(cols) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index a0fa10802f860..03fda038539e2 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -691,11 +691,11 @@ def test_unbalanced(self): ) df["id"] = df.index exp_data = { - "X": ["X1", "X1", "X2", "X2"], - "A": [1.0, 3.0, 2.0, 4.0], - "B": [5.0, np.nan, 6.0, np.nan], - "id": [0, 0, 1, 1], - "year": [2010, 2011, 2010, 2011], + "X": ["X1", "X2", "X1", "X2"], + "A": [1.0, 2.0, 3.0, 4.0], + "B": [5.0, 6.0, np.nan, np.nan], + "id": [0, 1, 0, 1], + "year": [2010, 2010, 2011, 2011], } expected = pd.DataFrame(exp_data) expected = expected.set_index(["id", "year"])[["X", "A", "B"]] @@ -938,10 +938,10 @@ def test_nonnumeric_suffix(self): ) expected = pd.DataFrame( { - "A": ["X1", "X1", "X2", "X2"], - "colname": ["placebo", "test", "placebo", "test"], - "result": [5.0, np.nan, 6.0, np.nan], - "treatment": [1.0, 3.0, 2.0, 4.0], + "A": ["X1", "X2", "X1", "X2"], + "colname": ["placebo", "placebo", "test", "test"], + "result": [5.0, 6.0, np.nan, np.nan], + "treatment": [1.0, 2.0, 3.0, 4.0], } ) expected = expected.set_index(["A", "colname"]) @@ -985,10 +985,10 @@ def test_float_suffix(self): ) expected = pd.DataFrame( { - "A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"], - "colname": [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1], - "result": [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan], - "treatment": [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0], + "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"], + "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1], + "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan], + "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], } ) expected = expected.set_index(["A", "colname"]) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index d9396d70f9112..3a4e54052305e 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -636,8 +636,15 @@ def test_str_cat_align_mixed_inputs(self, join): # mixed list of indexed/unindexed u = np.array(["A", "B", "C", "D"]) expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"]) + # joint index of rhs [t, u]; u will be forced have index of s - rhs_idx = t.index & s.index if join == "inner" else t.index | s.index + # GH 35092. If right join, maintain order of t.index + if join == "inner": + rhs_idx = t.index & s.index + elif join == "right": + rhs_idx = t.index.union(s.index, sort=False) + else: + rhs_idx = t.index | s.index expected = expected_outer.loc[s.index.join(rhs_idx, how=join)] result = s.str.cat([t, u], join=join, na_rep="-") From e73d7041d866a79d2457eecd0c056bec25645c12 Mon Sep 17 00:00:00 2001 From: William Ayd Date: Thu, 9 Jul 2020 14:57:04 -0700 Subject: [PATCH 078/632] Json Visual Clutter cleanup (#35183) --- pandas/_libs/src/ujson/python/objToJSON.c | 165 +++------------------- 1 file changed, 19 insertions(+), 146 deletions(-) diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index e841f00489887..59298522d86d1 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -143,8 +143,6 @@ typedef struct __PyObjectEncoder { enum PANDAS_FORMAT { SPLIT, RECORDS, INDEX, COLUMNS, VALUES }; -#define PRINTMARK() - int PdBlock_iterNext(JSOBJ, JSONTypeContext *); void *initObjToJSON(void) { @@ -219,13 +217,11 @@ static TypeContext *createTypeContext(void) { static PyObject *get_values(PyObject *obj) { PyObject *values = NULL; - PRINTMARK(); - - if (PyObject_TypeCheck(obj, cls_index) || PyObject_TypeCheck(obj, cls_series)) { + if (PyObject_TypeCheck(obj, cls_index) || + PyObject_TypeCheck(obj, cls_series)) { // The special cases to worry about are dt64tz and category[dt64tz]. // In both cases we want the UTC-localized datetime64 ndarray, // without going through and object array of Timestamps. - PRINTMARK(); values = PyObject_GetAttrString(obj, "values"); if (values == NULL) { @@ -236,7 +232,6 @@ static PyObject *get_values(PyObject *obj) { values = PyObject_CallMethod(values, "__array__", NULL); } else if (!PyArray_CheckExact(values)) { // Didn't get a numpy array, so keep trying - PRINTMARK(); Py_DECREF(values); values = NULL; } @@ -245,7 +240,6 @@ static PyObject *get_values(PyObject *obj) { if (values == NULL) { PyObject *typeRepr = PyObject_Repr((PyObject *)Py_TYPE(obj)); PyObject *repr; - PRINTMARK(); if (PyObject_HasAttrString(obj, "dtype")) { PyObject *dtype = PyObject_GetAttrString(obj, "dtype"); repr = PyObject_Repr(dtype); @@ -369,7 +363,6 @@ static char *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, size_t *outLen) { str = PyObject_CallMethod(obj, "isoformat", NULL); if (str == NULL) { - PRINTMARK(); *outLen = 0; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, "Failed to convert time"); @@ -397,7 +390,6 @@ static char *PyTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, size_t *outLen) { static void NpyArr_freeItemValue(JSOBJ Py_UNUSED(_obj), JSONTypeContext *tc) { if (GET_TC(tc)->npyarr && GET_TC(tc)->itemValue != GET_TC(tc)->npyarr->array) { - PRINTMARK(); Py_XDECREF(GET_TC(tc)->itemValue); GET_TC(tc)->itemValue = NULL; } @@ -417,7 +409,6 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { obj = (PyArrayObject *)_obj; } - PRINTMARK(); npyarr = PyObject_Malloc(sizeof(NpyArrContext)); GET_TC(tc)->npyarr = npyarr; @@ -454,7 +445,6 @@ void NpyArr_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; - PRINTMARK(); if (npyarr) { NpyArr_freeItemValue(obj, tc); @@ -463,13 +453,10 @@ void NpyArr_iterEnd(JSOBJ obj, JSONTypeContext *tc) { } void NpyArrPassThru_iterBegin(JSOBJ Py_UNUSED(obj), - JSONTypeContext *Py_UNUSED(tc)) { - PRINTMARK(); -} + JSONTypeContext *Py_UNUSED(tc)) {} void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; - PRINTMARK(); // finished this dimension, reset the data pointer npyarr->curdim--; npyarr->dataptr -= npyarr->stride * npyarr->index[npyarr->stridedim]; @@ -483,28 +470,24 @@ void NpyArrPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; - PRINTMARK(); if (PyErr_Occurred()) { return 0; } if (npyarr->index[npyarr->stridedim] >= npyarr->dim) { - PRINTMARK(); return 0; } NpyArr_freeItemValue(obj, tc); if (PyArray_ISDATETIME(npyarr->array)) { - PRINTMARK(); GET_TC(tc)->itemValue = obj; Py_INCREF(obj); ((PyObjectEncoder *)tc->encoder)->npyType = PyArray_TYPE(npyarr->array); ((PyObjectEncoder *)tc->encoder)->npyValue = npyarr->dataptr; ((PyObjectEncoder *)tc->encoder)->npyCtxtPassthru = npyarr; } else { - PRINTMARK(); GET_TC(tc)->itemValue = npyarr->getitem(npyarr->dataptr, npyarr->array); } @@ -515,16 +498,13 @@ int NpyArr_iterNextItem(JSOBJ obj, JSONTypeContext *tc) { int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; - PRINTMARK(); if (PyErr_Occurred()) { - PRINTMARK(); return 0; } if (npyarr->curdim >= npyarr->ndim || npyarr->index[npyarr->stridedim] >= npyarr->dim) { - PRINTMARK(); // innermost dimension, start retrieving item values GET_TC(tc)->iterNext = NpyArr_iterNextItem; return NpyArr_iterNextItem(_obj, tc); @@ -545,7 +525,6 @@ int NpyArr_iterNext(JSOBJ _obj, JSONTypeContext *tc) { } JSOBJ NpyArr_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { - PRINTMARK(); return GET_TC(tc)->itemValue; } @@ -553,7 +532,6 @@ char *NpyArr_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, size_t *outLen) { NpyArrContext *npyarr = GET_TC(tc)->npyarr; npy_intp idx; - PRINTMARK(); char *cStr; if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { @@ -580,7 +558,6 @@ char *NpyArr_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, void PdBlockPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; - PRINTMARK(); if (blkCtxt->transpose) { blkCtxt->colIdx++; @@ -593,7 +570,6 @@ void PdBlockPassThru_iterEnd(JSOBJ obj, JSONTypeContext *tc) { int PdBlock_iterNextItem(JSOBJ obj, JSONTypeContext *tc) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; - PRINTMARK(); if (blkCtxt->colIdx >= blkCtxt->ncols) { return 0; @@ -610,7 +586,6 @@ char *PdBlock_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, NpyArrContext *npyarr = blkCtxt->npyCtxts[0]; npy_intp idx; char *cStr; - PRINTMARK(); if (GET_TC(tc)->iterNext == PdBlock_iterNextItem) { idx = blkCtxt->colIdx - 1; @@ -633,7 +608,6 @@ char *PdBlock_iterGetName_Transpose(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, NpyArrContext *npyarr = blkCtxt->npyCtxts[blkCtxt->colIdx]; npy_intp idx; char *cStr; - PRINTMARK(); if (GET_TC(tc)->iterNext == NpyArr_iterNextItem) { idx = npyarr->index[npyarr->stridedim] - 1; @@ -650,7 +624,6 @@ char *PdBlock_iterGetName_Transpose(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; NpyArrContext *npyarr; - PRINTMARK(); if (PyErr_Occurred() || ((JSONObjectEncoder *)tc->encoder)->errorMsg) { return 0; @@ -675,7 +648,6 @@ int PdBlock_iterNext(JSOBJ obj, JSONTypeContext *tc) { void PdBlockPassThru_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PdBlockContext *blkCtxt = GET_TC(tc)->pdblock; - PRINTMARK(); if (blkCtxt->transpose) { // if transposed we exhaust each column before moving to the next @@ -697,7 +669,6 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { npy_int64 colIdx; npy_intp idx; - PRINTMARK(); obj = (PyObject *)_obj; GET_TC(tc)->iterGetName = GET_TC(tc)->transpose @@ -744,8 +715,8 @@ void PdBlock_iterBegin(JSOBJ _obj, JSONTypeContext *tc) { GET_TC(tc)->iterNext = NpyArr_iterNextNone; return; } else if (!PyTuple_Check(blocks)) { - PyErr_SetString(PyExc_TypeError, "blocks must be a tuple!"); - goto BLKRET; + PyErr_SetString(PyExc_TypeError, "blocks must be a tuple!"); + goto BLKRET; } // force transpose so each NpyArrContext strides down its column @@ -836,7 +807,6 @@ void PdBlock_iterEnd(JSOBJ obj, JSONTypeContext *tc) { PdBlockContext *blkCtxt; NpyArrContext *npyarr; int i; - PRINTMARK(); GET_TC(tc)->itemValue = NULL; npyarr = GET_TC(tc)->npyarr; @@ -948,7 +918,7 @@ JSOBJ Set_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { } char *Set_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc), - size_t *Py_UNUSED(outLen)) { + size_t *Py_UNUSED(outLen)) { return NULL; } @@ -961,7 +931,6 @@ void Dir_iterBegin(JSOBJ obj, JSONTypeContext *tc) { GET_TC(tc)->attrList = PyObject_Dir(obj); GET_TC(tc)->index = 0; GET_TC(tc)->size = PyList_GET_SIZE(GET_TC(tc)->attrList); - PRINTMARK(); } void Dir_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { @@ -976,7 +945,6 @@ void Dir_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { } Py_DECREF((PyObject *)GET_TC(tc)->attrList); - PRINTMARK(); } int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { @@ -1007,7 +975,6 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { attrStr = PyBytes_AS_STRING(attr); if (attrStr[0] == '_') { - PRINTMARK(); Py_DECREF(attr); continue; } @@ -1016,14 +983,12 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { if (itemValue == NULL) { PyErr_Clear(); Py_DECREF(attr); - PRINTMARK(); continue; } if (PyCallable_Check(itemValue)) { Py_DECREF(itemValue); Py_DECREF(attr); - PRINTMARK(); continue; } @@ -1031,7 +996,6 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { GET_TC(tc)->itemValue = itemValue; GET_TC(tc)->index++; - PRINTMARK(); itemName = attr; break; } @@ -1046,18 +1010,15 @@ int Dir_iterNext(JSOBJ _obj, JSONTypeContext *tc) { GET_TC(tc)->itemValue = itemValue; GET_TC(tc)->index++; - PRINTMARK(); return 1; } JSOBJ Dir_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { - PRINTMARK(); return GET_TC(tc)->itemValue; } char *Dir_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, size_t *outLen) { - PRINTMARK(); *outLen = PyBytes_GET_SIZE(GET_TC(tc)->itemName); return PyBytes_AS_STRING(GET_TC(tc)->itemName); } @@ -1073,7 +1034,6 @@ void List_iterBegin(JSOBJ obj, JSONTypeContext *tc) { int List_iterNext(JSOBJ obj, JSONTypeContext *tc) { if (GET_TC(tc)->index >= GET_TC(tc)->size) { - PRINTMARK(); return 0; } @@ -1102,7 +1062,6 @@ void Index_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (!GET_TC(tc)->cStr) { PyErr_NoMemory(); } - PRINTMARK(); } int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) { @@ -1123,18 +1082,14 @@ int Index_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 0; } } else { - PRINTMARK(); return 0; } GET_TC(tc)->index++; - PRINTMARK(); return 1; } -void Index_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) { - PRINTMARK(); -} +void Index_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *Py_UNUSED(tc)) {} JSOBJ Index_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->itemValue; @@ -1157,7 +1112,6 @@ void Series_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (!GET_TC(tc)->cStr) { PyErr_NoMemory(); } - PRINTMARK(); } int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) { @@ -1181,19 +1135,16 @@ int Series_iterNext(JSOBJ obj, JSONTypeContext *tc) { return 0; } } else { - PRINTMARK(); return 0; } GET_TC(tc)->index++; - PRINTMARK(); return 1; } void Series_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; enc->outputFormat = enc->originalOutputFormat; - PRINTMARK(); } JSOBJ Series_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { @@ -1217,7 +1168,6 @@ void DataFrame_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (!GET_TC(tc)->cStr) { PyErr_NoMemory(); } - PRINTMARK(); } int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) { @@ -1246,19 +1196,16 @@ int DataFrame_iterNext(JSOBJ obj, JSONTypeContext *tc) { GET_TC(tc)->itemValue = obj; } } else { - PRINTMARK(); return 0; } GET_TC(tc)->index++; - PRINTMARK(); return 1; } void DataFrame_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { PyObjectEncoder *enc = (PyObjectEncoder *)tc->encoder; enc->outputFormat = enc->originalOutputFormat; - PRINTMARK(); } JSOBJ DataFrame_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { @@ -1278,7 +1225,6 @@ char *DataFrame_iterGetName(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc, //============================================================================= void Dict_iterBegin(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { GET_TC(tc)->index = 0; - PRINTMARK(); } int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { @@ -1291,7 +1237,6 @@ int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { if (!PyDict_Next((PyObject *)GET_TC(tc)->dictObj, &GET_TC(tc)->index, &GET_TC(tc)->itemName, &GET_TC(tc)->itemValue)) { - PRINTMARK(); return 0; } @@ -1305,7 +1250,6 @@ int Dict_iterNext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { } else { Py_INCREF(GET_TC(tc)->itemName); } - PRINTMARK(); return 1; } @@ -1315,7 +1259,6 @@ void Dict_iterEnd(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { GET_TC(tc)->itemName = NULL; } Py_DECREF(GET_TC(tc)->dictObj); - PRINTMARK(); } JSOBJ Dict_iterGetValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { @@ -1366,7 +1309,6 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, char *dataptr, *cLabel; int type_num; NPY_DATETIMEUNIT base = enc->datetimeUnit; - PRINTMARK(); if (!labels) { return 0; @@ -1425,8 +1367,7 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, 1000000000LL; // nanoseconds per second } else { // datetime.* objects don't follow above rules - nanosecVal = - PyDateTimeToEpoch(item, NPY_FR_ns); + nanosecVal = PyDateTimeToEpoch(item, NPY_FR_ns); } } } @@ -1503,7 +1444,6 @@ char **NpyArr_encodeLabels(PyArrayObject *labels, PyObjectEncoder *enc, void Object_invokeDefaultHandler(PyObject *obj, PyObjectEncoder *enc) { PyObject *tmpObj = NULL; - PRINTMARK(); tmpObj = PyObject_CallFunctionObjArgs(enc->defaultHandler, obj, NULL); if (!PyErr_Occurred()) { if (tmpObj == NULL) { @@ -1524,7 +1464,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { double val; npy_int64 value; int unit; - PRINTMARK(); tc->prv = NULL; @@ -1537,11 +1476,9 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { enc = (PyObjectEncoder *)tc->encoder; if (PyBool_Check(obj)) { - PRINTMARK(); tc->type = (obj == Py_True) ? JT_TRUE : JT_FALSE; return; } else if (obj == Py_None) { - PRINTMARK(); tc->type = JT_NULL; return; } @@ -1554,7 +1491,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { tc->prv = pc; if (PyTypeNum_ISDATETIME(enc->npyType)) { - PRINTMARK(); int64_t longVal; PyArray_VectorUnaryFunc *castfunc = PyArray_GetCastFunc(PyArray_DescrFromType(enc->npyType), NPY_INT64); @@ -1564,12 +1500,10 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } castfunc(enc->npyValue, &longVal, 1, NULL, NULL); if (longVal == get_nat()) { - PRINTMARK(); tc->type = JT_NULL; } else { if (enc->datetimeIso) { - PRINTMARK(); if (enc->npyType == NPY_TIMEDELTA) { pc->PyTypeToUTF8 = NpyTimeDeltaToIsoCallback; } else { @@ -1580,7 +1514,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { GET_TC(tc)->longValue = longVal; tc->type = JT_UTF8; } else { - PRINTMARK(); NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; GET_TC(tc)->longValue = NpyDateTimeToEpoch(longVal, base); @@ -1597,30 +1530,24 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { if (PyIter_Check(obj) || (PyArray_Check(obj) && !PyArray_CheckScalar(obj))) { - PRINTMARK(); goto ISITERABLE; } if (PyLong_Check(obj)) { - PRINTMARK(); tc->type = JT_LONG; int overflow = 0; GET_TC(tc)->longValue = PyLong_AsLongLongAndOverflow(obj, &overflow); int err; err = (GET_TC(tc)->longValue == -1) && PyErr_Occurred(); - if (overflow){ - PRINTMARK(); + if (overflow) { tc->type = JT_BIGNUM; - } - else if (err) { - PRINTMARK(); + } else if (err) { goto INVALID; } - + return; } else if (PyFloat_Check(obj)) { - PRINTMARK(); val = PyFloat_AS_DOUBLE(obj); if (npy_isnan(val) || npy_isinf(val)) { tc->type = JT_NULL; @@ -1630,80 +1557,61 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } return; } else if (PyBytes_Check(obj)) { - PRINTMARK(); pc->PyTypeToUTF8 = PyBytesToUTF8; tc->type = JT_UTF8; return; } else if (PyUnicode_Check(obj)) { - PRINTMARK(); pc->PyTypeToUTF8 = PyUnicodeToUTF8; tc->type = JT_UTF8; return; } else if (PyObject_TypeCheck(obj, type_decimal)) { - PRINTMARK(); GET_TC(tc)->doubleValue = PyFloat_AsDouble(obj); tc->type = JT_DOUBLE; return; } else if (PyDateTime_Check(obj) || PyDate_Check(obj)) { if (PyObject_TypeCheck(obj, cls_nat)) { - PRINTMARK(); tc->type = JT_NULL; return; } - PRINTMARK(); if (enc->datetimeIso) { - PRINTMARK(); pc->PyTypeToUTF8 = PyDateTimeToIsoCallback; tc->type = JT_UTF8; } else { - PRINTMARK(); NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; - GET_TC(tc)->longValue = - PyDateTimeToEpoch(obj, base); + GET_TC(tc)->longValue = PyDateTimeToEpoch(obj, base); tc->type = JT_LONG; } return; } else if (PyTime_Check(obj)) { - PRINTMARK(); pc->PyTypeToUTF8 = PyTimeToJSON; tc->type = JT_UTF8; return; } else if (PyArray_IsScalar(obj, Datetime)) { - PRINTMARK(); if (((PyDatetimeScalarObject *)obj)->obval == get_nat()) { - PRINTMARK(); tc->type = JT_NULL; return; } - PRINTMARK(); if (enc->datetimeIso) { - PRINTMARK(); pc->PyTypeToUTF8 = PyDateTimeToIsoCallback; tc->type = JT_UTF8; } else { - PRINTMARK(); NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; - GET_TC(tc)->longValue = - PyDateTimeToEpoch(obj, base); + GET_TC(tc)->longValue = PyDateTimeToEpoch(obj, base); tc->type = JT_LONG; } return; } else if (PyDelta_Check(obj)) { if (PyObject_HasAttrString(obj, "value")) { - PRINTMARK(); value = get_long_attr(obj, "value"); } else { - PRINTMARK(); value = total_seconds(obj) * 1000000000LL; // nanoseconds per second } - PRINTMARK(); if (value == get_nat()) { - PRINTMARK(); tc->type = JT_NULL; return; } else if (enc->datetimeIso) { @@ -1718,7 +1626,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { exc = PyErr_Occurred(); if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) { - PRINTMARK(); goto INVALID; } @@ -1727,7 +1634,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { GET_TC(tc)->longValue = value; return; } else if (PyArray_IsScalar(obj, Integer)) { - PRINTMARK(); tc->type = JT_LONG; PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_INT64)); @@ -1735,19 +1641,16 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { exc = PyErr_Occurred(); if (exc && PyErr_ExceptionMatches(PyExc_OverflowError)) { - PRINTMARK(); goto INVALID; } return; } else if (PyArray_IsScalar(obj, Bool)) { - PRINTMARK(); PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->longValue), PyArray_DescrFromType(NPY_BOOL)); tc->type = (GET_TC(tc)->longValue) ? JT_TRUE : JT_FALSE; return; } else if (PyArray_IsScalar(obj, Float) || PyArray_IsScalar(obj, Double)) { - PRINTMARK(); PyArray_CastScalarToCtype(obj, &(GET_TC(tc)->doubleValue), PyArray_DescrFromType(NPY_DOUBLE)); tc->type = JT_DOUBLE; @@ -1758,7 +1661,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { obj); goto INVALID; } else if (PyObject_TypeCheck(obj, cls_na)) { - PRINTMARK(); tc->type = JT_NULL; return; } @@ -1767,7 +1669,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { if (PyObject_TypeCheck(obj, cls_index)) { if (enc->outputFormat == SPLIT) { - PRINTMARK(); tc->type = JT_OBJECT; pc->iterBegin = Index_iterBegin; pc->iterEnd = Index_iterEnd; @@ -1779,7 +1680,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->newObj = get_values(obj); if (pc->newObj) { - PRINTMARK(); tc->type = JT_ARRAY; pc->iterBegin = NpyArr_iterBegin; pc->iterEnd = NpyArr_iterEnd; @@ -1793,7 +1693,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } else if (PyObject_TypeCheck(obj, cls_series)) { if (enc->outputFormat == SPLIT) { - PRINTMARK(); tc->type = JT_OBJECT; pc->iterBegin = Series_iterBegin; pc->iterEnd = Series_iterEnd; @@ -1809,7 +1708,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) { - PRINTMARK(); tc->type = JT_OBJECT; tmpObj = PyObject_GetAttrString(obj, "index"); if (!tmpObj) { @@ -1827,7 +1725,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } } else { - PRINTMARK(); tc->type = JT_ARRAY; } pc->iterBegin = NpyArr_iterBegin; @@ -1838,7 +1735,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } else if (PyArray_Check(obj)) { if (enc->npyCtxtPassthru) { - PRINTMARK(); pc->npyarr = enc->npyCtxtPassthru; tc->type = (pc->npyarr->columnLabels ? JT_OBJECT : JT_ARRAY); @@ -1852,7 +1748,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } - PRINTMARK(); tc->type = JT_ARRAY; pc->iterBegin = NpyArr_iterBegin; pc->iterEnd = NpyArr_iterEnd; @@ -1862,7 +1757,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } else if (PyObject_TypeCheck(obj, cls_dataframe)) { if (enc->blkCtxtPassthru) { - PRINTMARK(); pc->pdblock = enc->blkCtxtPassthru; tc->type = (pc->pdblock->npyCtxts[0]->columnLabels ? JT_OBJECT : JT_ARRAY); @@ -1878,7 +1772,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } if (enc->outputFormat == SPLIT) { - PRINTMARK(); tc->type = JT_OBJECT; pc->iterBegin = DataFrame_iterBegin; pc->iterEnd = DataFrame_iterEnd; @@ -1888,7 +1781,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } - PRINTMARK(); if (is_simple_frame(obj)) { pc->iterBegin = NpyArr_iterBegin; pc->iterEnd = NpyArr_iterEnd; @@ -1908,10 +1800,8 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->iterGetValue = NpyArr_iterGetValue; if (enc->outputFormat == VALUES) { - PRINTMARK(); tc->type = JT_ARRAY; } else if (enc->outputFormat == RECORDS) { - PRINTMARK(); tc->type = JT_ARRAY; tmpObj = PyObject_GetAttrString(obj, "columns"); if (!tmpObj) { @@ -1930,7 +1820,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } } else if (enc->outputFormat == INDEX || enc->outputFormat == COLUMNS) { - PRINTMARK(); tc->type = JT_OBJECT; tmpObj = (enc->outputFormat == INDEX ? PyObject_GetAttrString(obj, "index") @@ -1973,7 +1862,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } if (enc->outputFormat == COLUMNS) { - PRINTMARK(); pc->transpose = 1; } } else { @@ -1981,7 +1869,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } return; } else if (PyDict_Check(obj)) { - PRINTMARK(); tc->type = JT_OBJECT; pc->iterBegin = Dict_iterBegin; pc->iterEnd = Dict_iterEnd; @@ -1993,7 +1880,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } else if (PyList_Check(obj)) { - PRINTMARK(); tc->type = JT_ARRAY; pc->iterBegin = List_iterBegin; pc->iterEnd = List_iterEnd; @@ -2002,7 +1888,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->iterGetName = List_iterGetName; return; } else if (PyTuple_Check(obj)) { - PRINTMARK(); tc->type = JT_ARRAY; pc->iterBegin = Tuple_iterBegin; pc->iterEnd = Tuple_iterEnd; @@ -2011,7 +1896,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { pc->iterGetName = Tuple_iterGetName; return; } else if (PyAnySet_Check(obj)) { - PRINTMARK(); tc->type = JT_ARRAY; pc->iterBegin = Set_iterBegin; pc->iterEnd = Set_iterEnd; @@ -2041,7 +1925,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { return; } - PRINTMARK(); tc->type = JT_OBJECT; pc->iterBegin = Dict_iterBegin; pc->iterEnd = Dict_iterEnd; @@ -2059,7 +1942,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { goto INVALID; } - PRINTMARK(); tc->type = JT_OBJECT; pc->iterBegin = Dir_iterBegin; pc->iterEnd = Dir_iterEnd; @@ -2076,7 +1958,6 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { } void Object_endTypeContext(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { - PRINTMARK(); if (tc->prv) { Py_XDECREF(GET_TC(tc)->newObj); GET_TC(tc)->newObj = NULL; @@ -2105,16 +1986,16 @@ double Object_getDoubleValue(JSOBJ Py_UNUSED(obj), JSONTypeContext *tc) { return GET_TC(tc)->doubleValue; } -const char *Object_getBigNumStringValue(JSOBJ obj, JSONTypeContext *tc, - size_t *_outLen) { - PyObject* repr = PyObject_Str(obj); - const char *str = PyUnicode_AsUTF8AndSize(repr, (Py_ssize_t *) _outLen); - char* bytes = PyObject_Malloc(*_outLen + 1); +const char *Object_getBigNumStringValue(JSOBJ obj, JSONTypeContext *tc, + size_t *_outLen) { + PyObject *repr = PyObject_Str(obj); + const char *str = PyUnicode_AsUTF8AndSize(repr, (Py_ssize_t *)_outLen); + char *bytes = PyObject_Malloc(*_outLen + 1); memcpy(bytes, str, *_outLen + 1); GET_TC(tc)->cStr = bytes; Py_DECREF(repr); - + return GET_TC(tc)->cStr; } @@ -2200,8 +2081,6 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, pyEncoder.outputFormat = COLUMNS; pyEncoder.defaultHandler = 0; - PRINTMARK(); - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OiOssOOi", kwlist, &oinput, &oensureAscii, &idoublePrecision, &oencodeHTMLChars, &sOrient, &sdateFormat, @@ -2274,16 +2153,12 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, encoder->indent = indent; pyEncoder.originalOutputFormat = pyEncoder.outputFormat; - PRINTMARK(); ret = JSON_EncodeObject(oinput, encoder, buffer, sizeof(buffer)); - PRINTMARK(); if (PyErr_Occurred()) { - PRINTMARK(); return NULL; } if (encoder->errorMsg) { - PRINTMARK(); if (ret != buffer) { encoder->free(ret); } @@ -2297,7 +2172,5 @@ PyObject *objToJSON(PyObject *Py_UNUSED(self), PyObject *args, encoder->free(ret); } - PRINTMARK(); - return newobj; } From 59128e348f34b9e7393defd5ce5ecec9e45c5adf Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 9 Jul 2020 15:00:13 -0700 Subject: [PATCH 079/632] REF: remove always-UTC arg from tz_convert, tz_convert_single (#35154) --- asv_bench/benchmarks/tslibs/tz_convert.py | 21 ++++-- pandas/_libs/tslibs/__init__.py | 4 +- pandas/_libs/tslibs/offsets.pyx | 5 +- pandas/_libs/tslibs/timestamps.pyx | 6 +- pandas/_libs/tslibs/tzconversion.pxd | 2 +- pandas/_libs/tslibs/tzconversion.pyx | 68 +++++-------------- pandas/core/arrays/datetimes.py | 4 +- .../tests/scalar/timestamp/test_timezones.py | 2 +- pandas/tests/tslibs/test_api.py | 2 +- pandas/tests/tslibs/test_conversion.py | 9 +-- pandas/tseries/frequencies.py | 5 +- 11 files changed, 52 insertions(+), 76 deletions(-) diff --git a/asv_bench/benchmarks/tslibs/tz_convert.py b/asv_bench/benchmarks/tslibs/tz_convert.py index 2a1f559bdf6d4..c2c90024ca5bd 100644 --- a/asv_bench/benchmarks/tslibs/tz_convert.py +++ b/asv_bench/benchmarks/tslibs/tz_convert.py @@ -1,16 +1,23 @@ import numpy as np from pytz import UTC -from pandas._libs.tslibs.tzconversion import tz_convert, tz_localize_to_utc +from pandas._libs.tslibs.tzconversion import tz_localize_to_utc from .tslib import _sizes, _tzs +try: + old_sig = False + from pandas._libs.tslibs.tzconversion import tz_convert_from_utc +except ImportError: + old_sig = True + from pandas._libs.tslibs.tzconversion import tz_convert as tz_convert_from_utc + class TimeTZConvert: - params = ( + params = [ _sizes, [x for x in _tzs if x is not None], - ) + ] param_names = ["size", "tz"] def setup(self, size, tz): @@ -21,7 +28,13 @@ def time_tz_convert_from_utc(self, size, tz): # effectively: # dti = DatetimeIndex(self.i8data, tz=tz) # dti.tz_localize(None) - tz_convert(self.i8data, UTC, tz) + if size >= 10 ** 6 and str(tz) == "tzlocal()": + # asv fill will because each call takes 8+seconds + return + if old_sig: + tz_convert_from_utc(self.i8data, UTC, tz) + else: + tz_convert_from_utc(self.i8data, tz) def time_tz_localize_to_utc(self, size, tz): # effectively: diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index c2f3478a50ab4..6fe6fa0a13c34 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -19,7 +19,7 @@ "ints_to_pytimedelta", "get_resolution", "Timestamp", - "tz_convert_single", + "tz_convert_from_utc_single", "to_offset", "Tick", "BaseOffset", @@ -34,7 +34,7 @@ from .resolution import Resolution from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta from .timestamps import Timestamp -from .tzconversion import tz_convert_single +from .tzconversion import tz_convert_from_utc_single from .vectorized import ( dt64arr_to_periodarr, get_resolution, diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index fb07e3fe7547e..0f9280ae92d39 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -46,8 +46,7 @@ from pandas._libs.tslibs.np_datetime cimport ( dt64_to_dtstruct, pydate_to_dtstruct, ) -from pandas._libs.tslibs.timezones cimport utc_pytz as UTC -from pandas._libs.tslibs.tzconversion cimport tz_convert_single +from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single from .dtypes cimport PeriodDtypeCode from .timedeltas cimport delta_to_nanoseconds @@ -264,7 +263,7 @@ cdef _to_dt64D(dt): # equiv `Timestamp(dt).value` or `dt.timestamp() * 10**9` nanos = getattr(dt, "nanosecond", 0) i8 = convert_datetime_to_tsobject(dt, tz=None, nanos=nanos).value - dt = tz_convert_single(i8, UTC, dt.tzinfo) + dt = tz_convert_from_utc_single(i8, dt.tzinfo) dt = np.int64(dt).astype('datetime64[ns]') else: dt = np.datetime64(dt) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index a2dacd9d36b14..8cef685933863 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -59,7 +59,7 @@ from pandas._libs.tslibs.timezones cimport ( get_timezone, tz_compare, ) from pandas._libs.tslibs.tzconversion cimport ( - tz_convert_single, + tz_convert_from_utc_single, tz_localize_to_utc_single, ) @@ -1309,7 +1309,7 @@ default 'raise' else: if tz is None: # reset tz - value = tz_convert_single(self.value, UTC, self.tz) + value = tz_convert_from_utc_single(self.value, self.tz) return Timestamp(value, tz=tz, freq=self.freq) else: raise TypeError( @@ -1391,7 +1391,7 @@ default 'raise' tzobj = self.tzinfo value = self.value if tzobj is not None: - value = tz_convert_single(value, UTC, tzobj) + value = tz_convert_from_utc_single(value, tzobj) # setup components dt64_to_dtstruct(value, &dts) diff --git a/pandas/_libs/tslibs/tzconversion.pxd b/pandas/_libs/tslibs/tzconversion.pxd index 7d102868256de..1990afd77a8fb 100644 --- a/pandas/_libs/tslibs/tzconversion.pxd +++ b/pandas/_libs/tslibs/tzconversion.pxd @@ -3,7 +3,7 @@ from numpy cimport int64_t cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=*) -cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2) +cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz) cdef int64_t tz_localize_to_utc_single( int64_t val, tzinfo tz, object ambiguous=*, object nonexistent=* ) except? -1 diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 98c40e109dbab..a6afd47d93479 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -366,17 +366,16 @@ cdef int64_t tz_convert_utc_to_tzlocal(int64_t utc_val, tzinfo tz, bint* fold=NU return _tz_convert_tzlocal_utc(utc_val, tz, to_utc=False, fold=fold) -cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2): +cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz): """ - Convert the val (in i8) from timezone1 to timezone2 + Convert the val (in i8) from UTC to tz - This is a single timezone version of tz_convert + This is a single value version of tz_convert_from_utc. Parameters ---------- val : int64 - tz1 : tzinfo - tz2 : tzinfo + tz : tzinfo Returns ------- @@ -384,38 +383,27 @@ cpdef int64_t tz_convert_single(int64_t val, tzinfo tz1, tzinfo tz2): """ cdef: int64_t arr[1] - bint to_utc = is_utc(tz2) - tzinfo tz - - # See GH#17734 We should always be converting either from UTC or to UTC - assert is_utc(tz1) or to_utc if val == NPY_NAT: return val - if to_utc: - tz = tz1 - else: - tz = tz2 - if is_utc(tz): return val elif is_tzlocal(tz): - return _tz_convert_tzlocal_utc(val, tz, to_utc=to_utc) + return _tz_convert_tzlocal_utc(val, tz, to_utc=False) else: arr[0] = val - return _tz_convert_dst(arr, tz, to_utc=to_utc)[0] + return _tz_convert_dst(arr, tz)[0] -def tz_convert(int64_t[:] vals, tzinfo tz1, tzinfo tz2): +def tz_convert_from_utc(int64_t[:] vals, tzinfo tz): """ - Convert the values (in i8) from timezone1 to timezone2 + Convert the values (in i8) from UTC to tz Parameters ---------- vals : int64 ndarray - tz1 : tzinfo - tz2 : tzinfo + tz : tzinfo Returns ------- @@ -423,36 +411,24 @@ def tz_convert(int64_t[:] vals, tzinfo tz1, tzinfo tz2): """ cdef: int64_t[:] converted - bint to_utc = is_utc(tz2) - tzinfo tz - - # See GH#17734 We should always be converting from UTC; otherwise - # should use tz_localize_to_utc. - assert is_utc(tz1) if len(vals) == 0: return np.array([], dtype=np.int64) - if to_utc: - tz = tz1 - else: - tz = tz2 - - converted = _tz_convert_one_way(vals, tz, to_utc=to_utc) + converted = _tz_convert_from_utc(vals, tz) return np.array(converted, dtype=np.int64) @cython.boundscheck(False) @cython.wraparound(False) -cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, tzinfo tz, bint to_utc): +cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): """ Convert the given values (in i8) either to UTC or from UTC. Parameters ---------- vals : int64 ndarray - tz1 : tzinfo - to_utc : bool + tz : tzinfo Returns ------- @@ -472,9 +448,9 @@ cdef int64_t[:] _tz_convert_one_way(int64_t[:] vals, tzinfo tz, bint to_utc): if val == NPY_NAT: converted[i] = NPY_NAT else: - converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc) + converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc=False) else: - converted = _tz_convert_dst(vals, tz, to_utc) + converted = _tz_convert_dst(vals, tz) return converted @@ -565,9 +541,7 @@ cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True, @cython.boundscheck(False) @cython.wraparound(False) -cdef int64_t[:] _tz_convert_dst( - const int64_t[:] values, tzinfo tz, bint to_utc=True, -): +cdef int64_t[:] _tz_convert_dst(const int64_t[:] values, tzinfo tz): """ tz_convert for non-UTC non-tzlocal cases where we have to check DST transitions pointwise. @@ -576,8 +550,6 @@ cdef int64_t[:] _tz_convert_dst( ---------- values : ndarray[int64_t] tz : tzinfo - to_utc : bool - True if converting _to_ UTC, False if converting _from_ utc Returns ------- @@ -607,10 +579,7 @@ cdef int64_t[:] _tz_convert_dst( if v == NPY_NAT: result[i] = v else: - if to_utc: - result[i] = v - delta - else: - result[i] = v + delta + result[i] = v + delta else: # Previously, this search was done pointwise to try and benefit @@ -629,9 +598,6 @@ cdef int64_t[:] _tz_convert_dst( # it elsewhere? raise ValueError("First time before start of DST info") - if to_utc: - result[i] = v - deltas[pos[i]] - else: - result[i] = v + deltas[pos[i]] + result[i] = v + deltas[pos[i]] return result diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8eac45cdedaec..5038df85c9160 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -728,7 +728,7 @@ def _local_timestamps(self): This is used to calculate time-of-day information as if the timestamps were timezone-naive. """ - return tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz) + return tzconversion.tz_convert_from_utc(self.asi8, self.tz) def tz_convert(self, tz): """ @@ -960,7 +960,7 @@ def tz_localize(self, tz, ambiguous="raise", nonexistent="raise"): if self.tz is not None: if tz is None: - new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz) + new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz) else: raise TypeError("Already tz-aware, use tz_convert to convert.") else: diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index 9611c827be6fe..83764aa184392 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -334,7 +334,7 @@ def test_timestamp_to_datetime_tzoffset(self): def test_timestamp_constructor_near_dst_boundary(self): # GH#11481 & GH#15777 # Naive string timestamps were being localized incorrectly - # with tz_convert_single instead of tz_localize_to_utc + # with tz_convert_from_utc_single instead of tz_localize_to_utc for tz in ["Europe/Brussels", "Europe/Prague"]: result = Timestamp("2015-10-25 01:00", tz=tz) diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 957706fcb460e..ccaceb7e6f906 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -47,7 +47,7 @@ def test_namespace(): "delta_to_nanoseconds", "ints_to_pytimedelta", "localize_pydatetime", - "tz_convert_single", + "tz_convert_from_utc_single", "to_offset", ] diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index 5a16fea47e90d..b35940c6bb95b 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -12,9 +12,9 @@ def _compare_utc_to_local(tz_didx): def f(x): - return tzconversion.tz_convert_single(x, UTC, tz_didx.tz) + return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz) - result = tzconversion.tz_convert(tz_didx.asi8, UTC, tz_didx.tz) + result = tzconversion.tz_convert_from_utc(tz_didx.asi8, tz_didx.tz) expected = np.vectorize(f)(tz_didx.asi8) tm.assert_numpy_array_equal(result, expected) @@ -22,9 +22,6 @@ def f(x): def _compare_local_to_utc(tz_didx, naive_didx): # Check that tz_localize behaves the same vectorized and pointwise. - def f(x): - return tzconversion.tz_convert_single(x, tz_didx.tz, UTC) - err1 = err2 = None try: result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz) @@ -71,7 +68,7 @@ def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq): ], ) def test_tz_convert_corner(arr): - result = tzconversion.tz_convert(arr, UTC, timezones.maybe_get_tz("Asia/Tokyo")) + result = tzconversion.tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo")) tm.assert_numpy_array_equal(result, arr) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index f94c8ef6550a5..23e08c7550646 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -21,7 +21,6 @@ ) from pandas._libs.tslibs.parsing import get_rule_month from pandas._libs.tslibs.resolution import month_position_check -from pandas._libs.tslibs.timezones import UTC from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( @@ -198,7 +197,9 @@ def __init__(self, index, warn: bool = True): # the timezone so they are in local time if hasattr(index, "tz"): if index.tz is not None: - self.i8values = tzconversion.tz_convert(self.i8values, UTC, index.tz) + self.i8values = tzconversion.tz_convert_from_utc( + self.i8values, index.tz + ) self.warn = warn From 8295dc42e5bf75d7315a495ffed6fa21ead680bb Mon Sep 17 00:00:00 2001 From: Robin to Roxel <35864265+r-toroxel@users.noreply.github.com> Date: Fri, 10 Jul 2020 00:05:33 +0200 Subject: [PATCH 080/632] TST Verifiy that dropna returns none when called inplace (#35179) (#35181) --- pandas/tests/frame/test_missing.py | 36 ++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 7cb7115276f71..9bf5d24085697 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -24,14 +24,16 @@ def test_dropEmptyRows(self, float_frame): smaller_frame = frame.dropna(how="all") # check that original was preserved tm.assert_series_equal(frame["foo"], original) - inplace_frame1.dropna(how="all", inplace=True) + return_value = inplace_frame1.dropna(how="all", inplace=True) tm.assert_series_equal(smaller_frame["foo"], expected) tm.assert_series_equal(inplace_frame1["foo"], expected) + assert return_value is None smaller_frame = frame.dropna(how="all", subset=["foo"]) - inplace_frame2.dropna(how="all", subset=["foo"], inplace=True) + return_value = inplace_frame2.dropna(how="all", subset=["foo"], inplace=True) tm.assert_series_equal(smaller_frame["foo"], expected) tm.assert_series_equal(inplace_frame2["foo"], expected) + assert return_value is None def test_dropIncompleteRows(self, float_frame): N = len(float_frame.index) @@ -45,18 +47,20 @@ def test_dropIncompleteRows(self, float_frame): smaller_frame = frame.dropna() tm.assert_series_equal(frame["foo"], original) - inp_frame1.dropna(inplace=True) + return_value = inp_frame1.dropna(inplace=True) exp = Series(mat[5:], index=float_frame.index[5:], name="foo") tm.assert_series_equal(smaller_frame["foo"], exp) tm.assert_series_equal(inp_frame1["foo"], exp) + assert return_value is None samesize_frame = frame.dropna(subset=["bar"]) tm.assert_series_equal(frame["foo"], original) assert (frame["bar"] == 5).all() - inp_frame2.dropna(subset=["bar"], inplace=True) + return_value = inp_frame2.dropna(subset=["bar"], inplace=True) tm.assert_index_equal(samesize_frame.index, float_frame.index) tm.assert_index_equal(inp_frame2.index, float_frame.index) + assert return_value is None def test_dropna(self): df = DataFrame(np.random.randn(6, 4)) @@ -65,31 +69,35 @@ def test_dropna(self): dropped = df.dropna(axis=1) expected = df.loc[:, [0, 1, 3]] inp = df.copy() - inp.dropna(axis=1, inplace=True) + return_value = inp.dropna(axis=1, inplace=True) tm.assert_frame_equal(dropped, expected) tm.assert_frame_equal(inp, expected) + assert return_value is None dropped = df.dropna(axis=0) expected = df.loc[list(range(2, 6))] inp = df.copy() - inp.dropna(axis=0, inplace=True) + return_value = inp.dropna(axis=0, inplace=True) tm.assert_frame_equal(dropped, expected) tm.assert_frame_equal(inp, expected) + assert return_value is None # threshold dropped = df.dropna(axis=1, thresh=5) expected = df.loc[:, [0, 1, 3]] inp = df.copy() - inp.dropna(axis=1, thresh=5, inplace=True) + return_value = inp.dropna(axis=1, thresh=5, inplace=True) tm.assert_frame_equal(dropped, expected) tm.assert_frame_equal(inp, expected) + assert return_value is None dropped = df.dropna(axis=0, thresh=4) expected = df.loc[range(2, 6)] inp = df.copy() - inp.dropna(axis=0, thresh=4, inplace=True) + return_value = inp.dropna(axis=0, thresh=4, inplace=True) tm.assert_frame_equal(dropped, expected) tm.assert_frame_equal(inp, expected) + assert return_value is None dropped = df.dropna(axis=1, thresh=4) tm.assert_frame_equal(dropped, df) @@ -100,9 +108,10 @@ def test_dropna(self): # subset dropped = df.dropna(axis=0, subset=[0, 1, 3]) inp = df.copy() - inp.dropna(axis=0, subset=[0, 1, 3], inplace=True) + return_value = inp.dropna(axis=0, subset=[0, 1, 3], inplace=True) tm.assert_frame_equal(dropped, df) tm.assert_frame_equal(inp, df) + assert return_value is None # all dropped = df.dropna(axis=1, how="all") @@ -126,12 +135,14 @@ def test_drop_and_dropna_caching(self): df2 = df.copy() df["A"].dropna() tm.assert_series_equal(df["A"], original) - df["A"].dropna(inplace=True) + return_value = df["A"].dropna(inplace=True) tm.assert_series_equal(df["A"], expected) + assert return_value is None df2["A"].drop([1]) tm.assert_series_equal(df2["A"], original) - df2["A"].drop([1], inplace=True) + return_value = df2["A"].drop([1], inplace=True) tm.assert_series_equal(df2["A"], original.drop([1])) + assert return_value is None def test_dropna_corner(self, float_frame): # bad input @@ -251,8 +262,9 @@ def test_fillna_different_dtype(self): ) tm.assert_frame_equal(result, expected) - df.fillna({2: "foo"}, inplace=True) + return_value = df.fillna({2: "foo"}, inplace=True) tm.assert_frame_equal(df, expected) + assert return_value is None def test_fillna_limit_and_value(self): # limit and value From 406049e6ff01f509a36576fd52232809996b4d50 Mon Sep 17 00:00:00 2001 From: Song Wenhao Date: Fri, 10 Jul 2020 06:06:36 +0800 Subject: [PATCH 081/632] DOC: fix code snippets for generic indexing (#35175) Co-authored-by: maxime.song --- doc/source/user_guide/cookbook.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst index 50b946999092a..49487ac327e73 100644 --- a/doc/source/user_guide/cookbook.rst +++ b/doc/source/user_guide/cookbook.rst @@ -219,8 +219,8 @@ There are 2 explicit slicing methods, with a third general case df.loc['bar':'kar'] # Label # Generic - df.iloc[0:3] - df.loc['bar':'kar'] + df[0:3] + df['bar':'kar'] Ambiguity arises when an index consists of integers with a non-zero start or non-unit increment. From 9b95eebaae22e9f9aad5ad80eea98aede67844c7 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 9 Jul 2020 15:08:16 -0700 Subject: [PATCH 082/632] PERF: periodarr_to_dt64arr (#35171) --- asv_bench/benchmarks/tslibs/period.py | 38 ++++++++++++++++++++++++++- pandas/_libs/tslibs/period.pyx | 31 ++++++++++++++++++---- 2 files changed, 63 insertions(+), 6 deletions(-) diff --git a/asv_bench/benchmarks/tslibs/period.py b/asv_bench/benchmarks/tslibs/period.py index 9156c4aa90ea0..1a2c89b48c665 100644 --- a/asv_bench/benchmarks/tslibs/period.py +++ b/asv_bench/benchmarks/tslibs/period.py @@ -2,10 +2,15 @@ Period benchmarks that rely only on tslibs. See benchmarks.period for Period benchmarks that rely on other parts fo pandas. """ -from pandas import Period + +import numpy as np + +from pandas._libs.tslibs.period import Period, periodarr_to_dt64arr from pandas.tseries.frequencies import to_offset +from .tslib import _sizes + class PeriodProperties: @@ -68,3 +73,34 @@ def setup(self, freq, is_offset): def time_period_constructor(self, freq, is_offset): Period("2012-06-01", freq=freq) + + +class TimePeriodArrToDT64Arr: + params = [ + _sizes, + [ + 1000, + 1011, # Annual - November End + 2000, + 2011, # Quarterly - November End + 3000, + 4000, + 4006, # Weekly - Saturday End + 5000, + 6000, + 7000, + 8000, + 9000, + 10000, + 11000, + 12000, + ], + ] + param_names = ["size", "freq"] + + def setup(self, size, freq): + arr = np.arange(10, dtype="i8").repeat(size // 10) + self.i8values = arr + + def time_periodarray_to_dt64arr(self, size, freq): + periodarr_to_dt64arr(self.i8values, freq) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e6ba1968797ed..e992b20b12db2 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -54,6 +54,7 @@ from pandas._libs.tslibs.ccalendar cimport ( get_days_in_month, ) from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS +from pandas._libs.tslibs.conversion import ensure_datetime64ns from pandas._libs.tslibs.dtypes cimport ( PeriodDtypeBase, @@ -943,14 +944,34 @@ def periodarr_to_dt64arr(const int64_t[:] periodarr, int freq): int64_t[:] out Py_ssize_t i, l - l = len(periodarr) + if freq < 6000: # i.e. FR_DAY, hard-code to avoid need to cast + l = len(periodarr) + out = np.empty(l, dtype="i8") - out = np.empty(l, dtype='i8') + # We get here with freqs that do not correspond to a datetime64 unit + for i in range(l): + out[i] = period_ordinal_to_dt64(periodarr[i], freq) - for i in range(l): - out[i] = period_ordinal_to_dt64(periodarr[i], freq) + return out.base # .base to access underlying np.ndarray - return out.base # .base to access underlying np.ndarray + else: + # Short-circuit for performance + if freq == FR_NS: + return periodarr.base + + if freq == FR_US: + dta = periodarr.base.view("M8[us]") + elif freq == FR_MS: + dta = periodarr.base.view("M8[ms]") + elif freq == FR_SEC: + dta = periodarr.base.view("M8[s]") + elif freq == FR_MIN: + dta = periodarr.base.view("M8[m]") + elif freq == FR_HR: + dta = periodarr.base.view("M8[h]") + elif freq == FR_DAY: + dta = periodarr.base.view("M8[D]") + return ensure_datetime64ns(dta) cpdef int64_t period_asfreq(int64_t ordinal, int freq1, int freq2, bint end): From c8d85e23b1f733397fa67c9170f10b05de8888ac Mon Sep 17 00:00:00 2001 From: Rik-de-Kort Date: Fri, 10 Jul 2020 01:34:44 +0200 Subject: [PATCH 083/632] ENH: Add optional argument index to pd.melt to maintain index values (#33659) --- doc/source/user_guide/reshaping.rst | 16 +++++++++++ doc/source/whatsnew/v1.1.0.rst | 2 ++ pandas/core/frame.py | 4 ++- pandas/core/reshape/melt.py | 10 +++++-- pandas/core/shared_docs.py | 16 +++++++++++ pandas/tests/reshape/test_melt.py | 41 +++++++++++++++++++++++++++++ 6 files changed, 86 insertions(+), 3 deletions(-) diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index c476e33b8ddde..aa6bf44547040 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -296,6 +296,22 @@ For instance, cheese.melt(id_vars=['first', 'last']) cheese.melt(id_vars=['first', 'last'], var_name='quantity') +When transforming a DataFrame using :func:`~pandas.melt`, the index will be ignored. The original index values can be kept around by setting the ``ignore_index`` parameter to ``False`` (default is ``True``). This will however duplicate them. + +.. versionadded:: 1.1.0 + +.. ipython:: python + + index = pd.MultiIndex.from_tuples([('person', 'A'), ('person', 'B')]) + cheese = pd.DataFrame({'first': ['John', 'Mary'], + 'last': ['Doe', 'Bo'], + 'height': [5.5, 6.0], + 'weight': [130, 150]}, + index=index) + cheese + cheese.melt(id_vars=['first', 'last']) + cheese.melt(id_vars=['first', 'last'], ignore_index=False) + Another way to transform is to use the :func:`~pandas.wide_to_long` panel data convenience function. It is less flexible than :func:`~pandas.melt`, but more user-friendly. diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 986ee371566cd..dc1ef12890233 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -297,6 +297,7 @@ Other enhancements This can be used to set a custom compression level, e.g., ``df.to_csv(path, compression={'method': 'gzip', 'compresslevel': 1}`` (:issue:`33196`) +- :meth:`melt` has gained an ``ignore_index`` (default ``True``) argument that, if set to ``False``, prevents the method from dropping the index (:issue:`17440`). - :meth:`Series.update` now accepts objects that can be coerced to a :class:`Series`, such as ``dict`` and ``list``, mirroring the behavior of :meth:`DataFrame.update` (:issue:`33215`) - :meth:`~pandas.core.groupby.GroupBy.transform` and :meth:`~pandas.core.groupby.GroupBy.aggregate` has gained ``engine`` and ``engine_kwargs`` arguments that supports executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) @@ -1168,3 +1169,4 @@ Other Contributors ~~~~~~~~~~~~ + diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 87041341ac3a6..3d2200cb45c6e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2145,7 +2145,7 @@ def to_stata( from pandas.io.stata import StataWriter117 as statawriter # type: ignore else: # versions 118 and 119 # mypy: Name 'statawriter' already defined (possibly by an import) - from pandas.io.stata import StataWriterUTF8 as statawriter # type:ignore + from pandas.io.stata import StataWriterUTF8 as statawriter # type: ignore kwargs: Dict[str, Any] = {} if version is None or version >= 117: @@ -7105,6 +7105,7 @@ def melt( var_name=None, value_name="value", col_level=None, + ignore_index=True, ) -> "DataFrame": return melt( @@ -7114,6 +7115,7 @@ def melt( var_name=var_name, value_name=value_name, col_level=col_level, + ignore_index=ignore_index, ) # ---------------------------------------------------------------------- diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 923b9e7462d8b..1ba6854a79265 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -14,6 +14,7 @@ import pandas.core.common as com from pandas.core.indexes.api import Index, MultiIndex from pandas.core.reshape.concat import concat +from pandas.core.reshape.util import _tile_compat from pandas.core.shared_docs import _shared_docs from pandas.core.tools.numeric import to_numeric @@ -32,8 +33,8 @@ def melt( var_name=None, value_name="value", col_level=None, + ignore_index: bool = True, ) -> "DataFrame": - # TODO: what about the existing index? # If multiindex, gather names of columns on all level for checking presence # of `id_vars` and `value_vars` if isinstance(frame.columns, MultiIndex): @@ -132,7 +133,12 @@ def melt( # asanyarray will keep the columns as an Index mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N) - return frame._constructor(mdata, columns=mcolumns) + result = frame._constructor(mdata, columns=mcolumns) + + if not ignore_index: + result.index = _tile_compat(frame.index, K) + + return result @deprecate_kwarg(old_arg_name="label", new_arg_name=None) diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 1894f551afea5..b81942f062b19 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -28,6 +28,11 @@ Name to use for the 'value' column. col_level : int or str, optional If columns are a MultiIndex then use this level to melt. + ignore_index : bool, default True + If True, original index is ignored. If False, the original index is retained. + Index labels will be repeated as necessary. + + .. versionadded:: 1.1.0 Returns ------- @@ -78,6 +83,17 @@ 1 b B 3 2 c B 5 + Original index values can be kept around: + + >>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False) + A variable value + 0 a B 1 + 1 b B 3 + 2 c B 5 + 0 a C 2 + 1 b C 4 + 2 c C 6 + If you have multi-index columns: >>> df.columns = [list('ABC'), list('DEF')] diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 03fda038539e2..241721432bbf9 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -357,6 +357,47 @@ def test_melt_mixed_int_str_value_vars(self): expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]}) tm.assert_frame_equal(result, expected) + def test_ignore_index(self): + # GH 17440 + df = DataFrame({"foo": [0], "bar": [1]}, index=["first"]) + result = melt(df, ignore_index=False) + expected = DataFrame( + {"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"] + ) + tm.assert_frame_equal(result, expected) + + def test_ignore_multiindex(self): + # GH 17440 + index = pd.MultiIndex.from_tuples( + [("first", "second"), ("first", "third")], names=["baz", "foobar"] + ) + df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index) + result = melt(df, ignore_index=False) + + expected_index = pd.MultiIndex.from_tuples( + [("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"] + ) + expected = DataFrame( + {"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]}, + index=expected_index, + ) + + tm.assert_frame_equal(result, expected) + + def test_ignore_index_name_and_type(self): + # GH 17440 + index = pd.Index(["foo", "bar"], dtype="category", name="baz") + df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index) + result = melt(df, ignore_index=False) + + expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz") + expected = DataFrame( + {"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]}, + index=expected_index, + ) + + tm.assert_frame_equal(result, expected) + class TestLreshape: def test_pairs(self): From 4274b118bf314d21eda846c9aea22b6d8014ba27 Mon Sep 17 00:00:00 2001 From: Jihwan Song Date: Thu, 9 Jul 2020 19:35:58 -0400 Subject: [PATCH 084/632] PERF: to speed up rendering of styler (#34863) --- asv_bench/asv.conf.json | 1 + asv_bench/benchmarks/io/style.py | 34 +++++++++++++++++++++++++++ doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/formats/style.py | 18 ++++++++++---- pandas/tests/io/formats/test_style.py | 13 +++------- 5 files changed, 52 insertions(+), 15 deletions(-) create mode 100644 asv_bench/benchmarks/io/style.py diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 7c10a2d17775a..4583fac85b776 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -53,6 +53,7 @@ "xlwt": [], "odfpy": [], "pytest": [], + "jinja2": [], // If using Windows with python 2.7 and want to build using the // mingw toolchain (rather than MSVC), uncomment the following line. // "libpython": [], diff --git a/asv_bench/benchmarks/io/style.py b/asv_bench/benchmarks/io/style.py new file mode 100644 index 0000000000000..4fc07bbabda06 --- /dev/null +++ b/asv_bench/benchmarks/io/style.py @@ -0,0 +1,34 @@ +import numpy as np + +from pandas import DataFrame + + +class RenderApply: + + params = [[12, 24, 36], [12, 120]] + param_names = ["cols", "rows"] + + def setup(self, cols, rows): + self.df = DataFrame( + np.random.randn(rows, cols), + columns=[f"float_{i+1}" for i in range(cols)], + index=[f"row_{i+1}" for i in range(rows)], + ) + self._style_apply() + + def time_render(self, cols, rows): + self.st.render() + + def peakmem_apply(self, cols, rows): + self._style_apply() + + def peakmem_render(self, cols, rows): + self.st.render() + + def _style_apply(self): + def _apply_func(s): + return [ + "background-color: lightcyan" if s.name == "row_1" else "" for v in s + ] + + self.st = self.df.style.apply(_apply_func, axis=1) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index dc1ef12890233..5473b7c1523f3 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -854,6 +854,7 @@ Performance improvements - Performance improvement in :class:`pandas.core.groupby.RollingGroupby` (:issue:`34052`) - Performance improvement in arithmetic operations (sub, add, mul, div) for MultiIndex (:issue:`34297`) - Performance improvement in `DataFrame[bool_indexer]` when `bool_indexer` is a list (:issue:`33924`) +- Significant performance improvement of :meth:`io.formats.style.Styler.render` with styles added with various ways such as :meth:`io.formats.style.Styler.apply`, :meth:`io.formats.style.Styler.applymap` or :meth:`io.formats.style.Styler.bar` (:issue:`19917`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 6250e99252928..d11144938eb26 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -561,11 +561,19 @@ def _update_ctx(self, attrs: DataFrame) -> None: Whitespace shouldn't matter and the final trailing ';' shouldn't matter. """ - for row_label, v in attrs.iterrows(): - for col_label, col in v.items(): - i = self.index.get_indexer([row_label])[0] - j = self.columns.get_indexer([col_label])[0] - for pair in col.rstrip(";").split(";"): + coli = {k: i for i, k in enumerate(self.columns)} + rowi = {k: i for i, k in enumerate(self.index)} + for jj in range(len(attrs.columns)): + cn = attrs.columns[jj] + j = coli[cn] + for rn, c in attrs[[cn]].itertuples(): + if not c: + continue + c = c.rstrip(";") + if not c: + continue + i = rowi[rn] + for pair in c.split(";"): self.ctx[(i, j)].append(pair) def _copy(self, deepcopy: bool = False) -> "Styler": diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index ec4614538004c..9c6910637fa7e 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -405,9 +405,10 @@ def f(x): result = self.df.style.where(f, style1)._compute().ctx expected = { - (r, c): [style1 if f(self.df.loc[row, col]) else ""] + (r, c): [style1] for r, row in enumerate(self.df.index) for c, col in enumerate(self.df.columns) + if f(self.df.loc[row, col]) } assert result == expected @@ -966,7 +967,6 @@ def test_bar_align_mid_nans(self): "transparent 25.0%, #d65f5f 25.0%, " "#d65f5f 50.0%, transparent 50.0%)", ], - (1, 0): [""], (0, 1): [ "width: 10em", " height: 80%", @@ -994,7 +994,6 @@ def test_bar_align_zero_nans(self): "transparent 50.0%, #d65f5f 50.0%, " "#d65f5f 75.0%, transparent 75.0%)", ], - (1, 0): [""], (0, 1): [ "width: 10em", " height: 80%", @@ -1091,7 +1090,7 @@ def test_format_with_bad_na_rep(self): def test_highlight_null(self, null_color="red"): df = pd.DataFrame({"A": [0, np.nan]}) result = df.style.highlight_null()._compute().ctx - expected = {(0, 0): [""], (1, 0): ["background-color: red"]} + expected = {(1, 0): ["background-color: red"]} assert result == expected def test_highlight_null_subset(self): @@ -1104,9 +1103,7 @@ def test_highlight_null_subset(self): .ctx ) expected = { - (0, 0): [""], (1, 0): ["background-color: red"], - (0, 1): [""], (1, 1): ["background-color: green"], } assert result == expected @@ -1219,8 +1216,6 @@ def test_highlight_max(self): expected = { (1, 0): ["background-color: yellow"], (1, 1): ["background-color: yellow"], - (0, 1): [""], - (0, 0): [""], } assert result == expected @@ -1228,8 +1223,6 @@ def test_highlight_max(self): expected = { (0, 1): ["background-color: yellow"], (1, 1): ["background-color: yellow"], - (0, 0): [""], - (1, 0): [""], } assert result == expected From 6a7e775ac9ba1dfcbad0dddea6e462358c05b9a9 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 9 Jul 2020 16:40:14 -0700 Subject: [PATCH 085/632] REF: re-use get_firstbday, get_lastbday in fields.pyx (#35199) --- pandas/_libs/tslibs/ccalendar.pxd | 2 + pandas/_libs/tslibs/ccalendar.pyx | 49 ++++++++++++++++ pandas/_libs/tslibs/fields.pyx | 77 ++++++-------------------- pandas/_libs/tslibs/offsets.pyx | 53 +++--------------- pandas/tests/tslibs/test_liboffsets.py | 5 +- 5 files changed, 78 insertions(+), 108 deletions(-) diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd index 41cc477413607..4eb5188b8a04b 100644 --- a/pandas/_libs/tslibs/ccalendar.pxd +++ b/pandas/_libs/tslibs/ccalendar.pxd @@ -10,6 +10,8 @@ cpdef int32_t get_days_in_month(int year, Py_ssize_t month) nogil cpdef int32_t get_week_of_year(int year, int month, int day) nogil cpdef iso_calendar_t get_iso_calendar(int year, int month, int day) nogil cpdef int32_t get_day_of_year(int year, int month, int day) nogil +cpdef int get_lastbday(int year, int month) nogil +cpdef int get_firstbday(int year, int month) nogil cdef int64_t DAY_NANOS cdef int64_t HOUR_NANOS diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index de8fd3911e946..00cecd25e5225 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -241,3 +241,52 @@ cpdef int32_t get_day_of_year(int year, int month, int day) nogil: day_of_year = mo_off + day return day_of_year + + +# --------------------------------------------------------------------- +# Business Helpers + +cpdef int get_lastbday(int year, int month) nogil: + """ + Find the last day of the month that is a business day. + + Parameters + ---------- + year : int + month : int + + Returns + ------- + last_bday : int + """ + cdef: + int wkday, days_in_month + + wkday = dayofweek(year, month, 1) + days_in_month = get_days_in_month(year, month) + return days_in_month - max(((wkday + days_in_month - 1) % 7) - 4, 0) + + +cpdef int get_firstbday(int year, int month) nogil: + """ + Find the first day of the month that is a business day. + + Parameters + ---------- + year : int + month : int + + Returns + ------- + first_bday : int + """ + cdef: + int first, wkday + + wkday = dayofweek(year, month, 1) + first = 1 + if wkday == 5: # on Saturday + first = 3 + elif wkday == 6: # on Sunday + first = 2 + return first diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 5ea7c0b6c5d02..03e4188fd06ef 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -19,6 +19,8 @@ from pandas._libs.tslibs.ccalendar cimport ( get_days_in_month, is_leapyear, dayofweek, get_week_of_year, get_day_of_year, get_iso_calendar, iso_calendar_t, month_offset, + get_firstbday, + get_lastbday, ) from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, @@ -137,9 +139,7 @@ def get_start_end_field(const int64_t[:] dtindex, str field, int end_month = 12 int start_month = 1 ndarray[int8_t] out - bint isleap npy_datetimestruct dts - int mo_off, dom, doy, dow, ldom out = np.zeros(count, dtype='int8') @@ -172,10 +172,8 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - dom = dts.day - dow = dayofweek(dts.year, dts.month, dts.day) - if (dom == 1 and dow < 5) or (dom <= 3 and dow == 0): + if dts.day == get_firstbday(dts.year, dts.month): out[i] = 1 else: @@ -185,9 +183,8 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - dom = dts.day - if dom == 1: + if dts.day == 1: out[i] = 1 elif field == 'is_month_end': @@ -198,15 +195,8 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - mo_off = month_offset[isleap * 13 + dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = month_offset[isleap * 13 + dts.month] - dow = dayofweek(dts.year, dts.month, dts.day) - - if (ldom == doy and dow < 5) or ( - dow == 4 and (ldom - doy <= 2)): + + if dts.day == get_lastbday(dts.year, dts.month): out[i] = 1 else: @@ -216,13 +206,8 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - mo_off = month_offset[isleap * 13 + dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = month_offset[isleap * 13 + dts.month] - if ldom == doy: + if dts.day == get_days_in_month(dts.year, dts.month): out[i] = 1 elif field == 'is_quarter_start': @@ -233,11 +218,9 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - dom = dts.day - dow = dayofweek(dts.year, dts.month, dts.day) if ((dts.month - start_month) % 3 == 0) and ( - (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): + dts.day == get_firstbday(dts.year, dts.month)): out[i] = 1 else: @@ -247,9 +230,8 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - dom = dts.day - if ((dts.month - start_month) % 3 == 0) and dom == 1: + if ((dts.month - start_month) % 3 == 0) and dts.day == 1: out[i] = 1 elif field == 'is_quarter_end': @@ -260,16 +242,9 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - mo_off = month_offset[isleap * 13 + dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = month_offset[isleap * 13 + dts.month] - dow = dayofweek(dts.year, dts.month, dts.day) if ((dts.month - end_month) % 3 == 0) and ( - (ldom == doy and dow < 5) or ( - dow == 4 and (ldom - doy <= 2))): + dts.day == get_lastbday(dts.year, dts.month)): out[i] = 1 else: @@ -279,13 +254,9 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - mo_off = month_offset[isleap * 13 + dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = month_offset[isleap * 13 + dts.month] - if ((dts.month - end_month) % 3 == 0) and (ldom == doy): + if ((dts.month - end_month) % 3 == 0) and ( + dts.day == get_days_in_month(dts.year, dts.month)): out[i] = 1 elif field == 'is_year_start': @@ -296,11 +267,9 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - dom = dts.day - dow = dayofweek(dts.year, dts.month, dts.day) if (dts.month == start_month) and ( - (dom == 1 and dow < 5) or (dom <= 3 and dow == 0)): + dts.day == get_firstbday(dts.year, dts.month)): out[i] = 1 else: @@ -310,9 +279,8 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - dom = dts.day - if (dts.month == start_month) and dom == 1: + if (dts.month == start_month) and dts.day == 1: out[i] = 1 elif field == 'is_year_end': @@ -323,16 +291,9 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - dom = dts.day - mo_off = month_offset[isleap * 13 + dts.month - 1] - doy = mo_off + dom - dow = dayofweek(dts.year, dts.month, dts.day) - ldom = month_offset[isleap * 13 + dts.month] if (dts.month == end_month) and ( - (ldom == doy and dow < 5) or ( - dow == 4 and (ldom - doy <= 2))): + dts.day == get_lastbday(dts.year, dts.month)): out[i] = 1 else: @@ -342,13 +303,9 @@ def get_start_end_field(const int64_t[:] dtindex, str field, continue dt64_to_dtstruct(dtindex[i], &dts) - isleap = is_leapyear(dts.year) - mo_off = month_offset[isleap * 13 + dts.month - 1] - dom = dts.day - doy = mo_off + dom - ldom = month_offset[isleap * 13 + dts.month] - if (dts.month == end_month) and (ldom == doy): + if (dts.month == end_month) and ( + dts.day == get_days_in_month(dts.year, dts.month)): out[i] = 1 else: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 0f9280ae92d39..4429ff083f350 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -34,7 +34,13 @@ from pandas._libs.tslibs.util cimport ( from pandas._libs.tslibs.ccalendar import ( MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday, ) -from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, get_days_in_month, dayofweek +from pandas._libs.tslibs.ccalendar cimport ( + DAY_NANOS, + dayofweek, + get_days_in_month, + get_firstbday, + get_lastbday, +) from pandas._libs.tslibs.conversion cimport ( convert_datetime_to_tsobject, localize_pydatetime, @@ -177,51 +183,6 @@ cdef _wrap_timedelta_result(result): # --------------------------------------------------------------------- # Business Helpers -cpdef int get_lastbday(int year, int month) nogil: - """ - Find the last day of the month that is a business day. - - Parameters - ---------- - year : int - month : int - - Returns - ------- - last_bday : int - """ - cdef: - int wkday, days_in_month - - wkday = dayofweek(year, month, 1) - days_in_month = get_days_in_month(year, month) - return days_in_month - max(((wkday + days_in_month - 1) % 7) - 4, 0) - - -cpdef int get_firstbday(int year, int month) nogil: - """ - Find the first day of the month that is a business day. - - Parameters - ---------- - year : int - month : int - - Returns - ------- - first_bday : int - """ - cdef: - int first, wkday - - wkday = dayofweek(year, month, 1) - first = 1 - if wkday == 5: # on Saturday - first = 3 - elif wkday == 6: # on Sunday - first = 2 - return first - cdef _get_calendar(weekmask, holidays, calendar): """Generate busdaycalendar""" diff --git a/pandas/tests/tslibs/test_liboffsets.py b/pandas/tests/tslibs/test_liboffsets.py index 206a604788c7e..6a514d2cc8713 100644 --- a/pandas/tests/tslibs/test_liboffsets.py +++ b/pandas/tests/tslibs/test_liboffsets.py @@ -5,6 +5,7 @@ import pytest +from pandas._libs.tslibs.ccalendar import get_firstbday, get_lastbday import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import roll_qtrday @@ -25,7 +26,7 @@ def day_opt(request): ) def test_get_last_bday(dt, exp_week_day, exp_last_day): assert dt.weekday() == exp_week_day - assert liboffsets.get_lastbday(dt.year, dt.month) == exp_last_day + assert get_lastbday(dt.year, dt.month) == exp_last_day @pytest.mark.parametrize( @@ -37,7 +38,7 @@ def test_get_last_bday(dt, exp_week_day, exp_last_day): ) def test_get_first_bday(dt, exp_week_day, exp_first_day): assert dt.weekday() == exp_week_day - assert liboffsets.get_firstbday(dt.year, dt.month) == exp_first_day + assert get_firstbday(dt.year, dt.month) == exp_first_day @pytest.mark.parametrize( From c8b44dda08ab3a2feb2c4572be960469c3f065f7 Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Thu, 9 Jul 2020 18:40:59 -0500 Subject: [PATCH 086/632] TST: update gbq service account key (#35090) Re-enable gbq integration tests. --- ci/travis_encrypt_gbq.sh | 2 +- ci/travis_gbq.json.enc | Bin 2352 -> 2352 bytes ci/travis_gbq_config.txt | 4 ++-- ci/travis_process_gbq_encryption.sh | 2 +- pandas/tests/io/test_gbq.py | 1 - 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ci/travis_encrypt_gbq.sh b/ci/travis_encrypt_gbq.sh index e404ca73a405e..7d5692d9520af 100755 --- a/ci/travis_encrypt_gbq.sh +++ b/ci/travis_encrypt_gbq.sh @@ -19,7 +19,7 @@ if [[ ! -f $GBQ_JSON_FILE ]]; then fi echo "Encrypting $GBQ_JSON_FILE..." -read -d "\n" TRAVIS_KEY TRAVIS_IV <<<$(travis encrypt-file $GBQ_JSON_FILE \ +read -d "\n" TRAVIS_KEY TRAVIS_IV <<<$(travis encrypt-file -r pandas-dev/pandas $GBQ_JSON_FILE \ travis_gbq.json.enc -f | grep -o "\w*_iv\|\w*_key"); echo "Adding your secure key to travis_gbq_config.txt ..." diff --git a/ci/travis_gbq.json.enc b/ci/travis_gbq.json.enc index c2a33bbd6f26383bd7e8a7a504e626284efb5fd0..6e0b6cee4048c70f9073dcdd75ed5008f2792c3e 100644 GIT binary patch literal 2352 zcmV-03D5R{crO*E{ek(-gWC#PQIGk*$iR%q>rF77-%gDxF9P?pb2rMmlMwIEx10ls z#PDGCjA$!JdSdpjZG0M3l|}#dZLrLz#@4cSS0@@t%glX;4Cgv+_M276eKlp=I~~f^ z%6@(WxCT-1IIF_t#kN*1>H;&?w(8kIry&7Tdh|#i_k}B$0Lp8?q^ZOjMAio zG~}090`&R9oHeK4@|G&WTDA(#J*5+tL_alee&0doZ1hX5f@K8!7VIsiIO5l>kw^9q zHn4oO0?|c@mcj5wmdg|43cpet_4yqeDXy!}NfQQGCFW$_mE<29(#wYNVMR-`L8-M*R?IybrG!?W#@hMr>cIK;v$yroE9pO zQb@b&_g0DR5b>Ol)3;QB7*Z!!fHm55$>4o?0mIAa#TnN^Z4+p*Z2;Uht1Zl|*rlcp z*#kXKpFu2=Bw99Fbjr(aYg?9R+)-dM`5PD1$*T49gous*frnsg;r)s@MWtfmI zLR)mg$U70>i+&b0$x35T{UM;)svKX@sgC|~I+K)zgn>}uRF879K)QH&m zerHq)M13h6y8+U!+qDEVikv~<3&=R%X5!0?8Pdh3wg)x#a&7x`4gtX^6*R2&c;VKa zdpBUe%+Dn*I@#gZRu6GaTw`v!Ph~`Q>mZNj{9M<-TLMv3o;1QnF3xQ~hU$?D>B{%^W@(yzk{L|L0Xpdv| zo7BMZuK&hCS$9&8Q=k&QJn2XFqZ4C_F{9tZ_pA*9ss|A1QA~O0Ea!z|$`u|9Hl90A zf_}IdJ2twW*p(j@gm+O|S9qmIVXHw?XzpNJ8u})9!g?TIsZLUtlXhS;A<%(E zY#(@-7UEcnq3v4wM}m4=aI?u;!jjj zj{y#}T1&-pta-xV|iAe;L~Y6)e#{Eu7vJi zFp&<(a5>&})lUM*kvbD&+Um68btHJ+xIu++nsaXPh27xV+P^ex6D-Z=2D|j84i#mb zZqTb^l=ev7M^2nCI#zDVrGrbW^Yw$dAuZa1i+P=ciW>ier!oKjISql9Uj{#phN)iFh?Y%cJSFg*e! ze_#xuad%2365^;n%ib)9#*k;gMLNT6!;F~oSampPoUpRW3zm9o>yK^Uw+M8)QQpf~ zLl90Q=5qY}n>;0t&D3@xwW23aEg4XD{t5mE zIFdFeWVp`GC21lVFbUk!8BQgXZxA&Sxt09e7T87sIY!^xJ4Z>lKgUwc+ zvv}f5AT7*14C5&?^hI{^#16^|h%X|SZ94E?Jt|o4N`fE)3k9EnIjr$F?aeuFdtRS* z2gch7Ir;~5tV{mT2Uf=$grPU^K(9E{K0D$DB0Nh3xvFf$dgKw9DVCcnb*v0jbOE8K zBmerx!Lrv;`r$+b(UpLuo(-FL75{zf|9zd84^~VGP}Z}(tRacc#0uU*V>!tY!P(Cx zWBEf+U^92gvLpwO?>nK_+U8pY2!Yc#3DmCbir{6AQFn}1?V)Qes1CNplnsBm)HRD%vCy=<|CT!dVU1CsBx#;b6?#-0=_@dkZT^22Z^ zq?UCZwVZJt-)oW5ODgD}S~a=`*&wFL9_ZVv5w8RQYG~OcOhttjso=0=)M{-={bTL( z%)F1VdvGBRuSxy2p5QBL9%yo%P8xJ>7VOF;$)fiFCLzLX{1Yok!WC>sT9?P!dKf!DcSq0>CvDP}E}q#rZl(V#J)t_VWJyG`)-e(B#o1bQ{dxyI=%t~g%{cKO z>L=mbyw_5R|2<(Xlz||b{4QL4?5PTpgA^Erkks{2PHY3+$Li1a{yu@o@%_B`7kiBI zuresPtxqLPM#g9yq-VL~uIfvGsh?KLx_cfB-Rf+q%se7im@7L>85(tpFm8P{@GMtm zg_5nF^uPL^iS^Z5mq_w1A;1+-aKa2C?vw@dAsG2lk!HTklD}D`R$Pf3?U;Q16kd7) zv2|W#d+=%H5w#%R4&sH7RS9ML_Kl*=MmkM_CEb z@Y+575%-IiF+LL|GVjPCqWQ&&4!Vesv%QrmSL}?=s7Z>YrW{7DHeu)i{I2cDGU4*Y zpL~~3vie3HB$lmO81RrnZhG!+TepN}ke9`&R&N^zYcWDJ=}|=$2vHQfkjZPddu-OU zSp~gP7(vSAjg>i=Du-%GL&cs-ob$y0Vdng~-RC-&AWWY*&tHYgo(hr;&}5Z+BnI&Q zcW9#;(+J!?=nhWKZMdyhnJ2V<4U-vKM7uMKAC|M0vASbw5OR#wHgI)KJf1D2HMNie>v8Wv Wjonm%xU9sD!uKl)As^5BMSjuVmYSFV literal 2352 zcmV-03D5QoiY_vZjh&7QCFrhKcFBG@`zj6HxkUamBtL*$SOfIYLQAnP$$?HCW-UzE zqY3S}bS_tytBr;XZgqTWlqlC0A?TtDDzJS4<-4yF+82AKZYaOSzyy z)LIN&*Phn|s>u2rH)V_1hyj-xu@)mBOg%_tj5_Sz6kyK>B5Gj0bp;~khYB=Ul|&X? zUFSM`<{}P#4_#PMfT#y?P!&Q=azAz#tG@DOU=aLF%RTb9pTg+mwrTZ+`_vBO5^xdb zCk{k&n*k1|x?M-4M;q$_?J$Z=GMNDL*;ETHrT|OpFalF9aJ;1NN8;rz^YfzF2c#MtNZvI;NuIJQ-M<=GHh=X9{ian$nm(H@?nOf1bgG`&RpLSr<5g9xf z2teKs?kATag6a+LsF}ejFjmcfSCRZKh(1~}uiJ(Qc@Q;)ValsMLtF!2X$O%Cb z2KMdb?&ns7GPy+RSdg<1=+QLqzgq74x1J+)2!4_{d|gtTVv9I=qfT>YNLb!NjSeg= zF|Qh88XA3rHR)>wth;QO_M(&hfA8)$QEpGgANx7DK|J`dW)T_`Xz_E!NK^R8RZg$y zc5}UIuDBt}n1#0!5GPf8Jbgag71LqHsVxL^@1qNIX|Dy=0vXV0(4^j2t$?ktEZdd5 zu_ckdLNK1WUPlJaR4^MLsqCIlhr=wrO2O}*qt8Z*MskXFh93(O!7RnBrwEDnT<`it5D0Mb#*2bx#aqC@LEJC=x_>Rx<|ygktaBRpWD z4#{MIj?XI%F|f1Z!qi;RP!vt6Ble@nmfAd}TzlXws1BJ)f5{5gri+aezIomN6ImrH zx}$i#tM@W$hzh(j)Gt+D=6S|?h}()_-~|h%S3)QyM`7f{Yf{v>p$dbYb8XdaAwacm zYIgF03~bBRJ?Q|Rm{AoSq^LSBkDa|`3tNoi02mXu+-Du+k_EUwoHMFk922)^pS;_D6#vtq~4S z0+*&E9tblkhvce%@L*}odrsPg ze1D(imA!lhnI7E+EDFG9720>Y4#l_d;0oNsr)BvjIN8`WGnc1$a?%?ycY8#Jhm$-C3s{t9ZH!5Tdr>`t41 zT)!t07R`S+w73>s@5X;v4d{Zrz<~%E?>$ry4A?zF{TOsf3y|_$p=_p^7 zyHtMEaO`#lEy8g>>v{%h!1*z-W`(rGI}x7M3P7v}4?u6$pF9q$Z>h4+;M|XMMXn-` zt;L)h+N2X->u!;3$*+|@qIVFK-FHTOWzOKyOMLi?7uHQUumZzC>x@c?*cS{IeR9pz z%j|yMgIP(6EQpB4%%ANMRmAGv^MZ8l-{UC8Un6k3C~MltE7?VC^N!9xT725P)|Gtf z&Y(8ua0ZUJO(-Sc>1rq^R0ra;Wa5&>w$UCFV36KRm<$T^2(h&JMd-wYacGQvViWbN z;Sj}nB6rj56!|*PGf00&z+`c`4W3nX4V>s9=aCW8AGAn)EiROzk#ku76;QET`eHgm z(nw)$QzY5E$?_QwzB-{3OpF_c;7(A1@_v7pYaO5JgoY(y&*&O#VUKi8dkA)N#1BEo z^s5wOm{@=f>c|t#|7>EeQqHh!uRXjICpE`%G!Z+Zt<^J-#-9iG(VG#%Nv?sI+ zbc`m4USJyzcgu?tl;%C}Ez6G@|f#&^hF+`g-yrj{hmY4yhlk+b#gV44cV?S5r%;?ge?g z#lzI?kuY1oXLg&XxdkBG8g*9plC**(x1xRs!fCuZZfAb#o*pyTq1{n<-CM+4c6lHo zqhwh;eK)Jl1X}YUP)?=oto!8X%qgNi1g>n7$x+*H3lrxcs&2-MENP(#=M;+oe_zRD zmCP_qF1Fe;UFgs(|6U79ig}b`dz4{4Eh38)&RvnO=3V=+bB@oe8weiJM6CJ5c%GQ-iz&#q=Du>_LJKa?c5%>1J4;MeQNYk^_$~ z;|WA1#Nz81yr8Jafys`4PisrSy?Jw~yQrKw#cLkq4Jq8We*d_mk#2#X^w3p=gJB>* z#!GJ%sBPy+SR&x<$od^Zj0! zidEfbN|w72WG4PR*<}{0X+HTW38KvQlnKe|LO@K*{nS!xOGu^})|VMf4R={d{^$ZY Wc%~RC+CiWM`BrrE1b(~# diff --git a/ci/travis_gbq_config.txt b/ci/travis_gbq_config.txt index 0b28cdedbd0d7..dc857c450331c 100644 --- a/ci/travis_gbq_config.txt +++ b/ci/travis_gbq_config.txt @@ -1,2 +1,2 @@ -TRAVIS_IV_ENV=encrypted_1d9d7b1f171b_iv -TRAVIS_KEY_ENV=encrypted_1d9d7b1f171b_key +TRAVIS_IV_ENV=encrypted_e05c934e101e_iv +TRAVIS_KEY_ENV=encrypted_e05c934e101e_key diff --git a/ci/travis_process_gbq_encryption.sh b/ci/travis_process_gbq_encryption.sh index 9967d40e49f0a..fccf8e1e8deff 100755 --- a/ci/travis_process_gbq_encryption.sh +++ b/ci/travis_process_gbq_encryption.sh @@ -7,7 +7,7 @@ if [[ -n ${SERVICE_ACCOUNT_KEY} ]]; then elif [[ -n ${!TRAVIS_IV_ENV} ]]; then openssl aes-256-cbc -K ${!TRAVIS_KEY_ENV} -iv ${!TRAVIS_IV_ENV} \ -in ci/travis_gbq.json.enc -out ci/travis_gbq.json -d; - export GBQ_PROJECT_ID='pandas-travis'; + export GBQ_PROJECT_ID='pandas-gbq-tests'; echo 'Successfully decrypted gbq credentials' fi diff --git a/pandas/tests/io/test_gbq.py b/pandas/tests/io/test_gbq.py index 870d78ef1c533..df107259d38cd 100644 --- a/pandas/tests/io/test_gbq.py +++ b/pandas/tests/io/test_gbq.py @@ -148,7 +148,6 @@ def mock_read_gbq(sql, **kwargs): @pytest.mark.single -@pytest.mark.xfail(reason="skipping gbq integration for now, xref #34779") class TestToGBQIntegrationWithServiceAccountKeyPath: @pytest.fixture() def gbq_dataset(self): From 995ca4914f10baeb26a2a62524c536e62c9960de Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Thu, 9 Jul 2020 19:41:55 -0400 Subject: [PATCH 087/632] TST: df.loc[:, 'col'] returning a view, but df.loc[df.index, 'col'] returning a copy (#34996) --- pandas/tests/indexing/test_loc.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 47980e88f76d4..30b13b6ea9fce 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -894,6 +894,22 @@ def test_identity_slice_returns_new_object(self): original_series[:3] = [7, 8, 9] assert all(sliced_series[:3] == [7, 8, 9]) + def test_loc_copy_vs_view(self): + # GH 15631 + x = DataFrame(zip(range(3), range(3)), columns=["a", "b"]) + + y = x.copy() + q = y.loc[:, "a"] + q += 2 + + tm.assert_frame_equal(x, y) + + z = x.copy() + q = z.loc[x.index, "a"] + q += 2 + + tm.assert_frame_equal(x, z) + def test_loc_uint64(self): # GH20722 # Test whether loc accept uint64 max value as index. From d536440d2324280dac2cd81d0568e2a597b97191 Mon Sep 17 00:00:00 2001 From: Ayappan Date: Fri, 10 Jul 2020 05:24:08 +0530 Subject: [PATCH 088/632] No fastcall attribute in POWER platform (#35083) --- pandas/_libs/src/ujson/lib/ultrajson.h | 8 +------- pandas/_libs/src/ujson/lib/ultrajsondec.c | 22 +++++++++++----------- pandas/_libs/src/ujson/lib/ultrajsonenc.c | 4 ++-- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/pandas/_libs/src/ujson/lib/ultrajson.h b/pandas/_libs/src/ujson/lib/ultrajson.h index 69284e1c3f2ab..757cabdbbc730 100644 --- a/pandas/_libs/src/ujson/lib/ultrajson.h +++ b/pandas/_libs/src/ujson/lib/ultrajson.h @@ -94,7 +94,7 @@ typedef __int64 JSLONG; #define EXPORTFUNCTION __declspec(dllexport) #define FASTCALL_MSVC __fastcall -#define FASTCALL_ATTR + #define INLINE_PREFIX static __inline #else @@ -108,12 +108,6 @@ typedef uint32_t JSUINT32; #define FASTCALL_MSVC -#if !defined __x86_64__ && !defined __aarch64__ -#define FASTCALL_ATTR __attribute__((fastcall)) -#else -#define FASTCALL_ATTR -#endif - #define INLINE_PREFIX static inline typedef uint8_t JSUINT8; diff --git a/pandas/_libs/src/ujson/lib/ultrajsondec.c b/pandas/_libs/src/ujson/lib/ultrajsondec.c index 36eb170f8048f..81327fd9efb06 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsondec.c +++ b/pandas/_libs/src/ujson/lib/ultrajsondec.c @@ -68,7 +68,7 @@ struct DecoderState { JSONObjectDecoder *dec; }; -JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds) FASTCALL_ATTR; +JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds); typedef JSOBJ (*PFN_DECODER)(struct DecoderState *ds); static JSOBJ SetError(struct DecoderState *ds, int offset, @@ -99,7 +99,7 @@ double createDouble(double intNeg, double intValue, double frcValue, return (intValue + (frcValue * g_pow10[frcDecimalCount])) * intNeg; } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decodePreciseFloat(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decodePreciseFloat(struct DecoderState *ds) { char *end; double value; errno = 0; @@ -114,7 +114,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decodePreciseFloat(struct DecoderState *ds) { return ds->dec->newDouble(ds->prv, value); } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_numeric(struct DecoderState *ds) { int intNeg = 1; int mantSize = 0; JSUINT64 intValue; @@ -340,7 +340,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_numeric(struct DecoderState *ds) { pow(10.0, expValue * expNeg)); } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_true(struct DecoderState *ds) { char *offset = ds->start; offset++; @@ -356,7 +356,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_true(struct DecoderState *ds) { return SetError(ds, -1, "Unexpected character found when decoding 'true'"); } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_false(struct DecoderState *ds) { char *offset = ds->start; offset++; @@ -373,7 +373,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_false(struct DecoderState *ds) { return SetError(ds, -1, "Unexpected character found when decoding 'false'"); } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_null(struct DecoderState *ds) { char *offset = ds->start; offset++; @@ -389,7 +389,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_null(struct DecoderState *ds) { return SetError(ds, -1, "Unexpected character found when decoding 'null'"); } -FASTCALL_ATTR void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds) { +void FASTCALL_MSVC SkipWhitespace(struct DecoderState *ds) { char *offset; for (offset = ds->start; (ds->end - offset) > 0; offset++) { @@ -677,7 +677,7 @@ static const JSUINT8 g_decoderLookup[256] = { DS_UTFLENERROR, }; -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_string(struct DecoderState *ds) { JSUTF16 sur[2] = {0}; int iSur = 0; int index; @@ -957,7 +957,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string(struct DecoderState *ds) { } } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_array(struct DecoderState *ds) { JSOBJ itemValue; JSOBJ newObj; int len; @@ -1021,7 +1021,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_array(struct DecoderState *ds) { } } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_object(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_object(struct DecoderState *ds) { JSOBJ itemName; JSOBJ itemValue; JSOBJ newObj; @@ -1104,7 +1104,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_object(struct DecoderState *ds) { } } -FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds) { +JSOBJ FASTCALL_MSVC decode_any(struct DecoderState *ds) { for (;;) { switch (*ds->start) { case '\"': diff --git a/pandas/_libs/src/ujson/lib/ultrajsonenc.c b/pandas/_libs/src/ujson/lib/ultrajsonenc.c index 51aa39a16920e..5343999c369f7 100644 --- a/pandas/_libs/src/ujson/lib/ultrajsonenc.c +++ b/pandas/_libs/src/ujson/lib/ultrajsonenc.c @@ -393,7 +393,7 @@ void Buffer_Realloc(JSONObjectEncoder *enc, size_t cbNeeded) { enc->end = enc->start + newSize; } -FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC +INLINE_PREFIX void FASTCALL_MSVC Buffer_AppendShortHexUnchecked(char *outputOffset, unsigned short value) { *(outputOffset++) = g_hexChars[(value & 0xf000) >> 12]; *(outputOffset++) = g_hexChars[(value & 0x0f00) >> 8]; @@ -722,7 +722,7 @@ int Buffer_EscapeStringValidated(JSOBJ obj, JSONObjectEncoder *enc, #define Buffer_AppendCharUnchecked(__enc, __chr) *((__enc)->offset++) = __chr; -FASTCALL_ATTR INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin, +INLINE_PREFIX void FASTCALL_MSVC strreverse(char *begin, char *end) { char aux; while (end > begin) aux = *end, *end-- = *begin, *begin++ = aux; From 2c3edaaaa0475841349659297039a606a72e9273 Mon Sep 17 00:00:00 2001 From: Vishwam Pandya Date: Thu, 9 Jul 2020 20:08:26 -0400 Subject: [PATCH 089/632] TST: category isin on frame (#34363) --- pandas/tests/frame/methods/test_isin.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pandas/tests/frame/methods/test_isin.py b/pandas/tests/frame/methods/test_isin.py index 79ea70a38f145..35d45bd00131b 100644 --- a/pandas/tests/frame/methods/test_isin.py +++ b/pandas/tests/frame/methods/test_isin.py @@ -189,3 +189,18 @@ def test_isin_empty_datetimelike(self): tm.assert_frame_equal(result, expected) result = df1_td.isin(df3) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + pd.DataFrame({"a": [1, 2, 3]}, dtype="category"), + pd.Series([1, 2, 3], dtype="category"), + ], + ) + def test_isin_category_frame(self, values): + # GH#34256 + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + expected = DataFrame({"a": [True, True, True], "b": [False, False, False]}) + + result = df.isin(values) + tm.assert_frame_equal(result, expected) From f5d7213111dfd9a7c114645a9a5e6454991f41e8 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 05:13:19 -0700 Subject: [PATCH 090/632] PERF: MonthOffset.apply_index (#35195) --- pandas/_libs/tslibs/offsets.pyx | 27 ++++++++++---------- pandas/tests/tseries/offsets/test_offsets.py | 6 ----- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 4429ff083f350..b0c6648514e99 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -558,7 +558,7 @@ cdef class BaseOffset: def _get_offset_day(self, other: datetime) -> int: # subclass must implement `_day_opt`; calling from the base class - # will raise NotImplementedError. + # will implicitly assume day_opt = "business_end", see get_day_of_month. cdef: npy_datetimestruct dts pydate_to_dtstruct(other, &dts) @@ -3611,7 +3611,6 @@ def shift_months(const int64_t[:] dtindex, int months, object day_opt=None): out[i] = dtstruct_to_dt64(&dts) elif day_opt in ["start", "end", "business_start", "business_end"]: _shift_months(dtindex, out, count, months, day_opt) - else: raise ValueError("day must be None, 'start', 'end', " "'business_start', or 'business_end'") @@ -3801,7 +3800,7 @@ def shift_month(stamp: datetime, months: int, day_opt: object=None) -> datetime: return stamp.replace(year=year, month=month, day=day) -cdef inline int get_day_of_month(npy_datetimestruct* dts, day_opt) nogil except? -1: +cdef inline int get_day_of_month(npy_datetimestruct* dts, str day_opt) nogil: """ Find the day in `other`'s month that satisfies a DateOffset's is_on_offset policy, as described by the `day_opt` argument. @@ -3827,27 +3826,23 @@ cdef inline int get_day_of_month(npy_datetimestruct* dts, day_opt) nogil except? >>> get_day_of_month(other, 'end') 30 + Notes + ----- + Caller is responsible for ensuring one of the four accepted day_opt values + is passed. """ - cdef: - int days_in_month if day_opt == "start": return 1 elif day_opt == "end": - days_in_month = get_days_in_month(dts.year, dts.month) - return days_in_month + return get_days_in_month(dts.year, dts.month) elif day_opt == "business_start": # first business day of month return get_firstbday(dts.year, dts.month) - elif day_opt == "business_end": + else: + # i.e. day_opt == "business_end": # last business day of month return get_lastbday(dts.year, dts.month) - elif day_opt is not None: - raise ValueError(day_opt) - elif day_opt is None: - # Note: unlike `shift_month`, get_day_of_month does not - # allow day_opt = None - raise NotImplementedError cpdef int roll_convention(int other, int n, int compare) nogil: @@ -3901,6 +3896,10 @@ def roll_qtrday(other: datetime, n: int, month: int, cdef: int months_since npy_datetimestruct dts + + if day_opt not in ["start", "end", "business_start", "business_end"]: + raise ValueError(day_opt) + pydate_to_dtstruct(other, &dts) if modby == 12: diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 784c04f225630..cffaa7b43d0cf 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -4310,12 +4310,6 @@ def test_all_offset_classes(self, tup): # --------------------------------------------------------------------- -def test_get_offset_day_error(): - # subclass of _BaseOffset must override _day_opt attribute, or we should - # get a NotImplementedError - - with pytest.raises(NotImplementedError): - DateOffset()._get_offset_day(datetime.now()) def test_valid_default_arguments(offset_types): From df24c10c56fb57ad779667011bf07df6d7c5adf0 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Fri, 10 Jul 2020 14:14:10 +0200 Subject: [PATCH 091/632] BUG: fix IntegerArray astype with copy=True/False (#34931) * BUG: fix IntegerArray astype with copy=True/False * fix mypy * return self for same dtype and copy=False * whatsnew --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/arrays/integer.py | 20 +++++++----- pandas/core/arrays/masked.py | 11 +++++++ pandas/tests/arrays/integer/test_dtypes.py | 38 ++++++++++++++++++++++ 4 files changed, 62 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5473b7c1523f3..5dff6d729479a 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1143,6 +1143,7 @@ ExtensionArray - Fixed bug where :meth:`StringArray.memory_usage` was not implemented (:issue:`33963`) - Fixed bug where :meth:`DataFrameGroupBy` would ignore the ``min_count`` argument for aggregations on nullable boolean dtypes (:issue:`34051`) - Fixed bug that `DataFrame(columns=.., dtype='string')` would fail (:issue:`27953`, :issue:`33623`) +- Fixed bug in ``IntegerArray.astype`` to correctly copy the mask as well (:issue:`34931`). Other ^^^^^ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 7be7ef3637ee5..b5cb681812939 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -448,18 +448,22 @@ def astype(self, dtype, copy: bool = True) -> ArrayLike: if incompatible type with an IntegerDtype, equivalent of same_kind casting """ - from pandas.core.arrays.boolean import BooleanDtype + from pandas.core.arrays.masked import BaseMaskedDtype from pandas.core.arrays.string_ import StringDtype dtype = pandas_dtype(dtype) - # if we are astyping to an existing IntegerDtype we can fastpath - if isinstance(dtype, _IntegerDtype): - result = self._data.astype(dtype.numpy_dtype, copy=False) - return dtype.construct_array_type()(result, mask=self._mask, copy=False) - elif isinstance(dtype, BooleanDtype): - result = self._data.astype("bool", copy=False) - return dtype.construct_array_type()(result, mask=self._mask, copy=False) + # if the dtype is exactly the same, we can fastpath + if self.dtype == dtype: + # return the same object for copy=False + return self.copy() if copy else self + # if we are astyping to another nullable masked dtype, we can fastpath + if isinstance(dtype, BaseMaskedDtype): + data = self._data.astype(dtype.numpy_dtype, copy=copy) + # mask is copied depending on whether the data was copied, and + # not directly depending on the `copy` keyword + mask = self._mask if data is self._data else self._mask.copy() + return dtype.construct_array_type()(data, mask, copy=False) elif isinstance(dtype, StringDtype): return dtype.construct_array_type()._from_sequence(self, copy=False) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 28add129825d1..235840d6d201e 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -40,6 +40,17 @@ class BaseMaskedDtype(ExtensionDtype): def numpy_dtype(self) -> np.dtype: raise AbstractMethodError + @classmethod + def construct_array_type(cls) -> Type["BaseMaskedArray"]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + raise NotImplementedError + class BaseMaskedArray(ExtensionArray, ExtensionOpsMixin): """ diff --git a/pandas/tests/arrays/integer/test_dtypes.py b/pandas/tests/arrays/integer/test_dtypes.py index cafe9e47a18f4..67efa4cb2ce4a 100644 --- a/pandas/tests/arrays/integer/test_dtypes.py +++ b/pandas/tests/arrays/integer/test_dtypes.py @@ -144,6 +144,44 @@ def test_astype(all_data): tm.assert_series_equal(result, expected) +def test_astype_copy(): + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Int64", copy=True) + assert result is not arr + assert not np.shares_memory(result._data, arr._data) + assert not np.shares_memory(result._mask, arr._mask) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Int64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + result = arr.astype("Int32", copy=False) + assert not np.shares_memory(result._data, arr._data) + assert not np.shares_memory(result._mask, arr._mask) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + def test_astype_to_larger_numpy(): a = pd.array([1, 2], dtype="Int32") result = a.astype("int64") From 7f50f71785c5d059ca4e5c24b00f40900bb2d606 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 05:15:21 -0700 Subject: [PATCH 092/632] CLN: tighten types to get_rule_month (#35205) --- pandas/_libs/tslibs/parsing.pxd | 2 +- pandas/_libs/tslibs/parsing.pyx | 14 ++++++-------- pandas/_libs/tslibs/period.pyx | 9 +++++---- pandas/core/arrays/period.py | 3 ++- pandas/tests/tslibs/test_libfrequencies.py | 12 ++++++------ 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/pandas/_libs/tslibs/parsing.pxd b/pandas/_libs/tslibs/parsing.pxd index 6e826cd4c6602..9c9262beaafad 100644 --- a/pandas/_libs/tslibs/parsing.pxd +++ b/pandas/_libs/tslibs/parsing.pxd @@ -1,2 +1,2 @@ -cpdef str get_rule_month(object source, str default=*) +cpdef str get_rule_month(str source) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 3a1af9fdb1e8f..92654f3b587e5 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -284,7 +284,7 @@ def parse_time_string(arg: str, freq=None, dayfirst=None, yearfirst=None): cdef parse_datetime_string_with_reso( - str date_string, object freq=None, bint dayfirst=False, bint yearfirst=False, + str date_string, str freq=None, bint dayfirst=False, bint yearfirst=False, ): """ Parse datetime string and try to identify its resolution. @@ -438,6 +438,7 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default, if freq is not None: # TODO: hack attack, #1228 + freq = getattr(freq, "freqstr", freq) try: mnum = c_MONTH_NUMBERS[get_rule_month(freq)] + 1 except (KeyError, ValueError): @@ -1020,15 +1021,14 @@ def concat_date_cols(tuple date_cols, bint keep_trivial_numbers=True): return result -# TODO: `default` never used? -cpdef str get_rule_month(object source, str default="DEC"): +cpdef str get_rule_month(str source): """ Return starting month of given freq, default is December. Parameters ---------- - source : object - default : str, default "DEC" + source : str + Derived from `freq.rule_code` or `freq.freqstr`. Returns ------- @@ -1042,10 +1042,8 @@ cpdef str get_rule_month(object source, str default="DEC"): >>> get_rule_month('A-JAN') 'JAN' """ - if is_offset_object(source): - source = source.freqstr source = source.upper() if "-" not in source: - return default + return "DEC" else: return source.split("-")[1] diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e992b20b12db2..20961c6da56bd 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -2440,13 +2440,13 @@ cdef int64_t _ordinal_from_fields(int year, int month, quarter, int day, BaseOffset freq): base = freq_to_dtype_code(freq) if quarter is not None: - year, month = quarter_to_myear(year, quarter, freq) + year, month = quarter_to_myear(year, quarter, freq.freqstr) return period_ordinal(year, month, day, hour, minute, second, 0, 0, base) -def quarter_to_myear(year: int, quarter: int, freq): +def quarter_to_myear(year: int, quarter: int, freqstr: str): """ A quarterly frequency defines a "year" which may not coincide with the calendar-year. Find the calendar-year and calendar-month associated @@ -2456,7 +2456,8 @@ def quarter_to_myear(year: int, quarter: int, freq): ---------- year : int quarter : int - freq : DateOffset + freqstr : str + Equivalent to freq.freqstr Returns ------- @@ -2470,7 +2471,7 @@ def quarter_to_myear(year: int, quarter: int, freq): if quarter <= 0 or quarter > 4: raise ValueError('Quarter must be 1 <= q <= 4') - mnum = c_MONTH_NUMBERS[get_rule_month(freq)] + 1 + mnum = c_MONTH_NUMBERS[get_rule_month(freqstr)] + 1 month = (mnum + (quarter - 1) * 3) % 12 + 1 if month > mnum: year -= 1 diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index b336371655466..8d5cb12d60e4d 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -1034,9 +1034,10 @@ def _range_from_fields( if base != FreqGroup.FR_QTR: raise AssertionError("base must equal FR_QTR") + freqstr = freq.freqstr year, quarter = _make_field_arrays(year, quarter) for y, q in zip(year, quarter): - y, m = libperiod.quarter_to_myear(y, q, freq) + y, m = libperiod.quarter_to_myear(y, q, freqstr) val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: diff --git a/pandas/tests/tslibs/test_libfrequencies.py b/pandas/tests/tslibs/test_libfrequencies.py index 993f2f4c8ef10..83f28f6b5dc01 100644 --- a/pandas/tests/tslibs/test_libfrequencies.py +++ b/pandas/tests/tslibs/test_libfrequencies.py @@ -9,19 +9,19 @@ "obj,expected", [ ("W", "DEC"), - (offsets.Week(), "DEC"), + (offsets.Week().freqstr, "DEC"), ("D", "DEC"), - (offsets.Day(), "DEC"), + (offsets.Day().freqstr, "DEC"), ("Q", "DEC"), - (offsets.QuarterEnd(startingMonth=12), "DEC"), + (offsets.QuarterEnd(startingMonth=12).freqstr, "DEC"), ("Q-JAN", "JAN"), - (offsets.QuarterEnd(startingMonth=1), "JAN"), + (offsets.QuarterEnd(startingMonth=1).freqstr, "JAN"), ("A-DEC", "DEC"), ("Y-DEC", "DEC"), - (offsets.YearEnd(), "DEC"), + (offsets.YearEnd().freqstr, "DEC"), ("A-MAY", "MAY"), ("Y-MAY", "MAY"), - (offsets.YearEnd(month=5), "MAY"), + (offsets.YearEnd(month=5).freqstr, "MAY"), ], ) def test_get_rule_month(obj, expected): From 3bcaea3833084b764dfde0a8d87e2bc636c3dab2 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 05:16:27 -0700 Subject: [PATCH 093/632] REF: remove libresolution (#35198) --- pandas/_libs/tslibs/__init__.py | 2 +- pandas/_libs/tslibs/fields.pyx | 40 ++++++++++++++++++++++ pandas/_libs/tslibs/resolution.pyx | 53 ------------------------------ pandas/core/arrays/datetimes.py | 4 +-- pandas/tests/tslibs/test_api.py | 1 - pandas/tseries/frequencies.py | 3 +- setup.py | 5 --- 7 files changed, 44 insertions(+), 64 deletions(-) delete mode 100644 pandas/_libs/tslibs/resolution.pyx diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 6fe6fa0a13c34..0ae4cc97d07e3 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -27,11 +27,11 @@ from . import dtypes from .conversion import localize_pydatetime +from .dtypes import Resolution from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime from .offsets import BaseOffset, Tick, to_offset from .period import IncompatibleFrequency, Period -from .resolution import Resolution from .timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta from .timestamps import Timestamp from .tzconversion import tz_convert_from_utc_single diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 03e4188fd06ef..1d1f900bc18b3 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -73,6 +73,46 @@ def build_field_sarray(const int64_t[:] dtindex): return out +def month_position_check(fields, weekdays): + cdef: + int32_t daysinmonth, y, m, d + bint calendar_end = True + bint business_end = True + bint calendar_start = True + bint business_start = True + bint cal + int32_t[:] years = fields["Y"] + int32_t[:] months = fields["M"] + int32_t[:] days = fields["D"] + + for y, m, d, wd in zip(years, months, days, weekdays): + if calendar_start: + calendar_start &= d == 1 + if business_start: + business_start &= d == 1 or (d <= 3 and wd == 0) + + if calendar_end or business_end: + daysinmonth = get_days_in_month(y, m) + cal = d == daysinmonth + if calendar_end: + calendar_end &= cal + if business_end: + business_end &= cal or (daysinmonth - d < 3 and wd == 4) + elif not calendar_start and not business_start: + break + + if calendar_end: + return "ce" + elif business_end: + return "be" + elif calendar_start: + return "cs" + elif business_start: + return "bs" + else: + return None + + @cython.wraparound(False) @cython.boundscheck(False) def get_date_name_field(const int64_t[:] dtindex, str field, object locale=None): diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx deleted file mode 100644 index d2861d8e9fe8d..0000000000000 --- a/pandas/_libs/tslibs/resolution.pyx +++ /dev/null @@ -1,53 +0,0 @@ - -import numpy as np -from numpy cimport int32_t - -from pandas._libs.tslibs.dtypes import Resolution -from pandas._libs.tslibs.ccalendar cimport get_days_in_month - - -# ---------------------------------------------------------------------- -# Frequency Inference - -def month_position_check(fields, weekdays): - cdef: - int32_t daysinmonth, y, m, d - bint calendar_end = True - bint business_end = True - bint calendar_start = True - bint business_start = True - bint cal - int32_t[:] years - int32_t[:] months - int32_t[:] days - - years = fields['Y'] - months = fields['M'] - days = fields['D'] - - for y, m, d, wd in zip(years, months, days, weekdays): - if calendar_start: - calendar_start &= d == 1 - if business_start: - business_start &= d == 1 or (d <= 3 and wd == 0) - - if calendar_end or business_end: - daysinmonth = get_days_in_month(y, m) - cal = d == daysinmonth - if calendar_end: - calendar_end &= cal - if business_end: - business_end &= cal or (daysinmonth - d < 3 and wd == 4) - elif not calendar_start and not business_start: - break - - if calendar_end: - return 'ce' - elif business_end: - return 'be' - elif calendar_start: - return 'cs' - elif business_start: - return 'bs' - else: - return None diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 5038df85c9160..7058ed3682d59 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -7,6 +7,7 @@ from pandas._libs import lib, tslib from pandas._libs.tslibs import ( NaT, + Resolution, Timestamp, conversion, fields, @@ -15,7 +16,6 @@ ints_to_pydatetime, is_date_array_normalized, normalize_i8_timestamps, - resolution as libresolution, timezones, to_offset, tzconversion, @@ -533,7 +533,7 @@ def is_normalized(self): return is_date_array_normalized(self.asi8, self.tz) @property # NB: override with cache_readonly in immutable subclasses - def _resolution_obj(self) -> libresolution.Resolution: + def _resolution_obj(self) -> Resolution: return get_resolution(self.asi8, self.tz) # ---------------------------------------------------------------- diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index ccaceb7e6f906..036037032031a 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -16,7 +16,6 @@ def test_namespace(): "offsets", "parsing", "period", - "resolution", "strptime", "vectorized", "timedeltas", diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 23e08c7550646..f80ff1a53cd69 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -12,7 +12,7 @@ MONTHS, int_to_weekday, ) -from pandas._libs.tslibs.fields import build_field_sarray +from pandas._libs.tslibs.fields import build_field_sarray, month_position_check from pandas._libs.tslibs.offsets import ( # noqa:F401 DateOffset, Day, @@ -20,7 +20,6 @@ to_offset, ) from pandas._libs.tslibs.parsing import get_rule_month -from pandas._libs.tslibs.resolution import month_position_check from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( diff --git a/setup.py b/setup.py index 1885546e001fe..aebbdbf4d1e96 100755 --- a/setup.py +++ b/setup.py @@ -319,7 +319,6 @@ class CheckSDist(sdist_class): "pandas/_libs/tslibs/conversion.pyx", "pandas/_libs/tslibs/fields.pyx", "pandas/_libs/tslibs/offsets.pyx", - "pandas/_libs/tslibs/resolution.pyx", "pandas/_libs/tslibs/parsing.pyx", "pandas/_libs/tslibs/tzconversion.pyx", "pandas/_libs/tslibs/vectorized.pyx", @@ -639,10 +638,6 @@ def srcpath(name=None, suffix=".pyx", subdir="src"): "depends": tseries_depends, "sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"], }, - "_libs.tslibs.resolution": { - "pyxfile": "_libs/tslibs/resolution", - "depends": tseries_depends, - }, "_libs.tslibs.strptime": { "pyxfile": "_libs/tslibs/strptime", "depends": tseries_depends, From a9327df1d47c5c6fcf70529b02c890cd828edd63 Mon Sep 17 00:00:00 2001 From: rjfs Date: Fri, 10 Jul 2020 14:53:59 +0100 Subject: [PATCH 094/632] BUG: fix read_excel error for header=None and index_col as list #31783 (#35035) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/excel/_base.py | 4 +++- pandas/tests/io/excel/test_readers.py | 13 +++++++++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5dff6d729479a..798a3d838ef7e 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1053,6 +1053,7 @@ I/O - Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) - :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) +- Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) - Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) Plotting diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 4fa4f158e9c3c..2a12f779230b2 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -468,7 +468,9 @@ def parse( if is_list_like(index_col): # Forward fill values for MultiIndex index. - if not is_list_like(header): + if header is None: + offset = 0 + elif not is_list_like(header): offset = 1 + header else: offset = 1 + max(header) diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index ddc631532194a..b610c5ec3a838 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -968,6 +968,19 @@ def test_deprecated_kwargs(self, read_ext): pd.read_excel("test1" + read_ext) + def test_no_header_with_list_index_col(self, read_ext): + # GH 31783 + file_name = "testmultiindex" + read_ext + data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)] + idx = pd.MultiIndex.from_tuples( + [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1) + ) + expected = pd.DataFrame(data, index=idx, columns=(2, 3)) + result = pd.read_excel( + file_name, sheet_name="index_col_none", index_col=[0, 1], header=None + ) + tm.assert_frame_equal(expected, result) + class TestExcelFileRead: @pytest.fixture(autouse=True) From 280efbfcc8afc8e14dd5050958d808f4a12135c9 Mon Sep 17 00:00:00 2001 From: Jon Thielen Date: Fri, 10 Jul 2020 09:55:38 -0500 Subject: [PATCH 095/632] Add xarray copyright notice to comply with reuse under Apache License (#35213) --- LICENSES/XARRAY_LICENSE | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/LICENSES/XARRAY_LICENSE b/LICENSES/XARRAY_LICENSE index 37ec93a14fdcd..6bafeb9d3d80e 100644 --- a/LICENSES/XARRAY_LICENSE +++ b/LICENSES/XARRAY_LICENSE @@ -1,3 +1,7 @@ +Copyright 2014-2019, xarray Developers + +-------------------------------------------------------------------------------- + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ From 0534b004f5dd9cb38082a2d1e3e121b211a3373b Mon Sep 17 00:00:00 2001 From: Kaiqi Dong Date: Fri, 10 Jul 2020 21:16:27 +0200 Subject: [PATCH 096/632] ENH: Implement Keyword Aggregation for DataFrame.agg and Series.agg (#29116) --- pandas/core/aggregation.py | 162 +++++++++++++++++- pandas/core/frame.py | 12 +- pandas/core/groupby/generic.py | 22 +-- pandas/core/series.py | 7 +- pandas/tests/frame/apply/__init__.py | 0 .../frame/apply/test_apply_relabeling.py | 104 +++++++++++ .../test_frame_apply.py} | 0 pandas/tests/series/apply/__init__.py | 0 .../series/apply/test_apply_relabeling.py | 33 ++++ .../test_series_apply.py} | 0 10 files changed, 317 insertions(+), 23 deletions(-) create mode 100644 pandas/tests/frame/apply/__init__.py create mode 100644 pandas/tests/frame/apply/test_apply_relabeling.py rename pandas/tests/frame/{test_apply.py => apply/test_frame_apply.py} (100%) create mode 100644 pandas/tests/series/apply/__init__.py create mode 100644 pandas/tests/series/apply/test_apply_relabeling.py rename pandas/tests/series/{test_apply.py => apply/test_series_apply.py} (100%) diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 838722f60b380..16c4a9f862d79 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -5,12 +5,99 @@ from collections import defaultdict from functools import partial -from typing import Any, Callable, DefaultDict, List, Sequence, Tuple, Union +from typing import ( + Any, + Callable, + DefaultDict, + Dict, + List, + Optional, + Sequence, + Tuple, + Union, +) + +from pandas._typing import Label from pandas.core.dtypes.common import is_dict_like, is_list_like +from pandas.core.base import SpecificationError import pandas.core.common as com from pandas.core.indexes.api import Index +from pandas.core.series import FrameOrSeriesUnion, Series + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncType = Union[ + AggFuncTypeBase, + List[AggFuncTypeBase], + Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], +] + + +def reconstruct_func( + func: Optional[AggFuncType], **kwargs, +) -> Tuple[ + bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]], +]: + """ + This is the internal function to reconstruct func given if there is relabeling + or not and also normalize the keyword to get new order of columns. + + If named aggregation is applied, `func` will be None, and kwargs contains the + column and aggregation function information to be parsed; + If named aggregation is not applied, `func` is either string (e.g. 'min') or + Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name + and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]}) + + If relabeling is True, will return relabeling, reconstructed func, column + names, and the reconstructed order of columns. + If relabeling is False, the columns and order will be None. + + Parameters + ---------- + func: agg function (e.g. 'min' or Callable) or list of agg functions + (e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}). + **kwargs: dict, kwargs used in is_multi_agg_with_relabel and + normalize_keyword_aggregation function for relabelling + + Returns + ------- + relabelling: bool, if there is relabelling or not + func: normalized and mangled func + columns: list of column names + order: list of columns indices + + Examples + -------- + >>> reconstruct_func(None, **{"foo": ("col", "min")}) + (True, defaultdict(None, {'col': ['min']}), ('foo',), array([0])) + + >>> reconstruct_func("min") + (False, 'min', None, None) + """ + relabeling = func is None and is_multi_agg_with_relabel(**kwargs) + columns: Optional[List[str]] = None + order: Optional[List[int]] = None + + if not relabeling: + if isinstance(func, list) and len(func) > len(set(func)): + + # GH 28426 will raise error if duplicated function names are used and + # there is no reassigned name + raise SpecificationError( + "Function names must be unique if there is no new column names " + "assigned" + ) + elif func is None: + # nicer error message + raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") + + if relabeling: + func, columns, order = normalize_keyword_aggregation(kwargs) + func = maybe_mangle_lambdas(func) + + return relabeling, func, columns, order def is_multi_agg_with_relabel(**kwargs) -> bool: @@ -198,6 +285,79 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any: return mangled_aggspec +def relabel_result( + result: FrameOrSeriesUnion, + func: Dict[str, List[Union[Callable, str]]], + columns: Tuple, + order: List[int], +) -> Dict[Label, Series]: + """Internal function to reorder result if relabelling is True for + dataframe.agg, and return the reordered result in dict. + + Parameters: + ---------- + result: Result from aggregation + func: Dict of (column name, funcs) + columns: New columns name for relabelling + order: New order for relabelling + + Examples: + --------- + >>> result = DataFrame({"A": [np.nan, 2, np.nan], + ... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP + >>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]} + >>> columns = ("foo", "aab", "bar", "dat") + >>> order = [0, 1, 2, 3] + >>> _relabel_result(result, func, columns, order) # doctest: +SKIP + dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]), + C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]), + B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"])) + """ + reordered_indexes = [ + pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1]) + ] + reordered_result_in_dict: Dict[Label, Series] = {} + idx = 0 + + reorder_mask = not isinstance(result, Series) and len(result.columns) > 1 + for col, fun in func.items(): + s = result[col].dropna() + + # In the `_aggregate`, the callable names are obtained and used in `result`, and + # these names are ordered alphabetically. e.g. + # C2 C1 + # 1 NaN + # amax NaN 4.0 + # max NaN 4.0 + # sum 18.0 6.0 + # Therefore, the order of functions for each column could be shuffled + # accordingly so need to get the callable name if it is not parsed names, and + # reorder the aggregated result for each column. + # e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is + # [sum, ], but in `result`, it will be [, sum], and we need to + # reorder so that aggregated values map to their functions regarding the order. + + # However there is only one column being used for aggregation, not need to + # reorder since the index is not sorted, and keep as is in `funcs`, e.g. + # A + # min 1.0 + # mean 1.5 + # mean 1.5 + if reorder_mask: + fun = [ + com.get_callable_name(f) if not isinstance(f, str) else f for f in fun + ] + col_idx_order = Index(s.index).get_indexer(fun) + s = s[col_idx_order] + + # assign the new user-provided "named aggregation" as index names, and reindex + # it based on the whole user-provided names. + s.index = reordered_indexes[idx : idx + len(fun)] + reordered_result_in_dict[col] = s.reindex(columns, copy=False) + idx = idx + len(fun) + return reordered_result_in_dict + + def validate_func_kwargs( kwargs: dict, ) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3d2200cb45c6e..10539ab74b4aa 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -114,6 +114,7 @@ from pandas.core import algorithms, common as com, nanops, ops from pandas.core.accessor import CachedAccessor +from pandas.core.aggregation import reconstruct_func, relabel_result from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray from pandas.core.arrays.sparse import SparseFrameAccessor @@ -7301,9 +7302,11 @@ def _gotitem( examples=_agg_examples_doc, versionadded="\n.. versionadded:: 0.20.0\n", ) - def aggregate(self, func, axis=0, *args, **kwargs): + def aggregate(self, func=None, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) + relabeling, func, columns, order = reconstruct_func(func, **kwargs) + result = None try: result, how = self._aggregate(func, axis=axis, *args, **kwargs) @@ -7315,6 +7318,13 @@ def aggregate(self, func, axis=0, *args, **kwargs): raise exc from err if result is None: return self.apply(func, axis=axis, args=args, **kwargs) + + if relabeling: + # This is to keep the order to columns occurrence unchanged, and also + # keep the order of new columns occurrence unchanged + result_in_dict = relabel_result(result, func, columns, order) + result = DataFrame(result_in_dict, index=columns) + return result def _aggregate(self, arg, axis=0, *args, **kwargs): diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index ebb9d82766c1b..7f2eac520264d 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -55,9 +55,8 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core.aggregation import ( - is_multi_agg_with_relabel, maybe_mangle_lambdas, - normalize_keyword_aggregation, + reconstruct_func, validate_func_kwargs, ) import pandas.core.algorithms as algorithms @@ -937,24 +936,7 @@ def aggregate( self, func=None, *args, engine="cython", engine_kwargs=None, **kwargs ): - relabeling = func is None and is_multi_agg_with_relabel(**kwargs) - if relabeling: - func, columns, order = normalize_keyword_aggregation(kwargs) - - kwargs = {} - elif isinstance(func, list) and len(func) > len(set(func)): - - # GH 28426 will raise error if duplicated function names are used and - # there is no reassigned name - raise SpecificationError( - "Function names must be unique if there is no new column " - "names assigned" - ) - elif func is None: - # nicer error message - raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") - - func = maybe_mangle_lambdas(func) + relabeling, func, columns, order = reconstruct_func(func, **kwargs) if engine == "numba": return self._python_agg_general( diff --git a/pandas/core/series.py b/pandas/core/series.py index 6c1d21e4526cf..9a633079b8c1d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4016,9 +4016,14 @@ def _gotitem(self, key, ndim, subset=None) -> "Series": examples=_agg_examples_doc, versionadded="\n.. versionadded:: 0.20.0\n", ) - def aggregate(self, func, axis=0, *args, **kwargs): + def aggregate(self, func=None, axis=0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) + + # if func is None, will switch to user-provided "named aggregation" kwargs + if func is None: + func = dict(kwargs.items()) + result, how = self._aggregate(func, *args, **kwargs) if result is None: diff --git a/pandas/tests/frame/apply/__init__.py b/pandas/tests/frame/apply/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/frame/apply/test_apply_relabeling.py b/pandas/tests/frame/apply/test_apply_relabeling.py new file mode 100644 index 0000000000000..965f69753bdc7 --- /dev/null +++ b/pandas/tests/frame/apply/test_apply_relabeling.py @@ -0,0 +1,104 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestDataFrameNamedAggregate: + def test_agg_relabel(self): + # GH 26513 + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + + # simplest case with one column, one func + result = df.agg(foo=("B", "sum")) + expected = pd.DataFrame({"B": [10]}, index=pd.Index(["foo"])) + tm.assert_frame_equal(result, expected) + + # test on same column with different methods + result = df.agg(foo=("B", "sum"), bar=("B", "min")) + expected = pd.DataFrame({"B": [10, 1]}, index=pd.Index(["foo", "bar"])) + + tm.assert_frame_equal(result, expected) + + def test_agg_relabel_multi_columns_multi_methods(self): + # GH 26513, test on multiple columns with multiple methods + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + result = df.agg( + foo=("A", "sum"), + bar=("B", "mean"), + cat=("A", "min"), + dat=("B", "max"), + f=("A", "max"), + g=("C", "min"), + ) + expected = pd.DataFrame( + { + "A": [6.0, np.nan, 1.0, np.nan, 2.0, np.nan], + "B": [np.nan, 2.5, np.nan, 4.0, np.nan, np.nan], + "C": [np.nan, np.nan, np.nan, np.nan, np.nan, 3.0], + }, + index=pd.Index(["foo", "bar", "cat", "dat", "f", "g"]), + ) + tm.assert_frame_equal(result, expected) + + def test_agg_relabel_partial_functions(self): + # GH 26513, test on partial, functools or more complex cases + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4], "C": [3, 4, 5, 6]}) + result = df.agg(foo=("A", np.mean), bar=("A", "mean"), cat=("A", min)) + expected = pd.DataFrame( + {"A": [1.5, 1.5, 1.0]}, index=pd.Index(["foo", "bar", "cat"]) + ) + tm.assert_frame_equal(result, expected) + + result = df.agg( + foo=("A", min), + bar=("A", np.min), + cat=("B", max), + dat=("C", "min"), + f=("B", np.sum), + kk=("B", lambda x: min(x)), + ) + expected = pd.DataFrame( + { + "A": [1.0, 1.0, np.nan, np.nan, np.nan, np.nan], + "B": [np.nan, np.nan, 4.0, np.nan, 10.0, 1.0], + "C": [np.nan, np.nan, np.nan, 3.0, np.nan, np.nan], + }, + index=pd.Index(["foo", "bar", "cat", "dat", "f", "kk"]), + ) + tm.assert_frame_equal(result, expected) + + def test_agg_namedtuple(self): + # GH 26513 + df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) + result = df.agg( + foo=pd.NamedAgg("B", "sum"), + bar=pd.NamedAgg("B", min), + cat=pd.NamedAgg(column="B", aggfunc="count"), + fft=pd.NamedAgg("B", aggfunc="max"), + ) + + expected = pd.DataFrame( + {"B": [3, 1, 2, 2]}, index=pd.Index(["foo", "bar", "cat", "fft"]) + ) + tm.assert_frame_equal(result, expected) + + result = df.agg( + foo=pd.NamedAgg("A", "min"), + bar=pd.NamedAgg(column="B", aggfunc="max"), + cat=pd.NamedAgg(column="A", aggfunc="max"), + ) + expected = pd.DataFrame( + {"A": [0.0, np.nan, 1.0], "B": [np.nan, 2.0, np.nan]}, + index=pd.Index(["foo", "bar", "cat"]), + ) + tm.assert_frame_equal(result, expected) + + def test_agg_raises(self): + # GH 26513 + df = pd.DataFrame({"A": [0, 1], "B": [1, 2]}) + msg = "Must provide" + + with pytest.raises(TypeError, match=msg): + df.agg() diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/apply/test_frame_apply.py similarity index 100% rename from pandas/tests/frame/test_apply.py rename to pandas/tests/frame/apply/test_frame_apply.py diff --git a/pandas/tests/series/apply/__init__.py b/pandas/tests/series/apply/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/series/apply/test_apply_relabeling.py b/pandas/tests/series/apply/test_apply_relabeling.py new file mode 100644 index 0000000000000..0b8d2c4e1f26d --- /dev/null +++ b/pandas/tests/series/apply/test_apply_relabeling.py @@ -0,0 +1,33 @@ +import pandas as pd +import pandas._testing as tm + + +class TestNamedAggregation: + def test_relabel_no_duplicated_method(self): + # this is to test there is no duplicated method used in agg + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]}) + + result = df["A"].agg(foo="sum") + expected = df["A"].agg({"foo": "sum"}) + tm.assert_series_equal(result, expected) + + result = df["B"].agg(foo="min", bar="max") + expected = df["B"].agg({"foo": "min", "bar": "max"}) + tm.assert_series_equal(result, expected) + + result = df["B"].agg(foo=sum, bar=min, cat="max") + expected = df["B"].agg({"foo": sum, "bar": min, "cat": "max"}) + tm.assert_series_equal(result, expected) + + def test_relabel_duplicated_method(self): + # this is to test with nested renaming, duplicated method can be used + # if they are assigned with different new names + df = pd.DataFrame({"A": [1, 2, 1, 2], "B": [1, 2, 3, 4]}) + + result = df["A"].agg(foo="sum", bar="sum") + expected = pd.Series([6, 6], index=["foo", "bar"], name="A") + tm.assert_series_equal(result, expected) + + result = df["B"].agg(foo=min, bar="min") + expected = pd.Series([1, 1], index=["foo", "bar"], name="B") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/apply/test_series_apply.py similarity index 100% rename from pandas/tests/series/test_apply.py rename to pandas/tests/series/apply/test_series_apply.py From c7626b13bab35be1993eafd81fa4f7da5e3bf2c1 Mon Sep 17 00:00:00 2001 From: Robin to Roxel <35864265+r-toroxel@users.noreply.github.com> Date: Fri, 10 Jul 2020 21:32:34 +0200 Subject: [PATCH 097/632] Tst return none inplace series (#35210) --- pandas/tests/series/indexing/test_indexing.py | 6 ++-- .../series/methods/test_drop_duplicates.py | 33 ++++++++++++------- pandas/tests/series/methods/test_fillna.py | 3 +- pandas/tests/series/methods/test_replace.py | 24 +++++++++----- .../tests/series/methods/test_reset_index.py | 3 +- .../tests/series/methods/test_sort_values.py | 3 +- pandas/tests/series/methods/test_truncate.py | 3 +- pandas/tests/series/test_api.py | 18 ++++++---- pandas/tests/series/test_datetime_values.py | 3 +- pandas/tests/series/test_missing.py | 15 ++++++--- 10 files changed, 74 insertions(+), 37 deletions(-) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 737e21af9242f..3ed25b8bca566 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -736,14 +736,16 @@ def test_append_timedelta_does_not_cast(td): def test_underlying_data_conversion(): # GH 4080 df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]}) - df.set_index(["a", "b", "c"], inplace=True) + return_value = df.set_index(["a", "b", "c"], inplace=True) + assert return_value is None s = Series([1], index=[(2, 2, 2)]) df["val"] = 0 df df["val"].update(s) expected = DataFrame(dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0])) - expected.set_index(["a", "b", "c"], inplace=True) + return_value = expected.set_index(["a", "b", "c"], inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) # GH 3970 diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index a4532ebb3d8c5..40651c4342e8a 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -22,7 +22,8 @@ def test_drop_duplicates(any_numpy_dtype, keep, expected): tm.assert_series_equal(tc.duplicated(keep=keep), expected) tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) sc = tc.copy() - sc.drop_duplicates(keep=keep, inplace=True) + return_value = sc.drop_duplicates(keep=keep, inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc[~expected]) @@ -40,8 +41,9 @@ def test_drop_duplicates_bool(keep, expected): tm.assert_series_equal(tc.duplicated(keep=keep), expected) tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) sc = tc.copy() - sc.drop_duplicates(keep=keep, inplace=True) + return_value = sc.drop_duplicates(keep=keep, inplace=True) tm.assert_series_equal(sc, tc[~expected]) + assert return_value is None @pytest.mark.parametrize("values", [[], list(range(5))]) @@ -84,21 +86,24 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered): tm.assert_series_equal(tc1.duplicated(), expected) tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected]) sc = tc1.copy() - sc.drop_duplicates(inplace=True) + return_value = sc.drop_duplicates(inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc1[~expected]) expected = Series([False, False, True, False]) tm.assert_series_equal(tc1.duplicated(keep="last"), expected) tm.assert_series_equal(tc1.drop_duplicates(keep="last"), tc1[~expected]) sc = tc1.copy() - sc.drop_duplicates(keep="last", inplace=True) + return_value = sc.drop_duplicates(keep="last", inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc1[~expected]) expected = Series([False, False, True, True]) tm.assert_series_equal(tc1.duplicated(keep=False), expected) tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected]) sc = tc1.copy() - sc.drop_duplicates(keep=False, inplace=True) + return_value = sc.drop_duplicates(keep=False, inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc1[~expected]) # Test case 2 @@ -113,21 +118,24 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered): tm.assert_series_equal(tc2.duplicated(), expected) tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected]) sc = tc2.copy() - sc.drop_duplicates(inplace=True) + return_value = sc.drop_duplicates(inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc2[~expected]) expected = Series([False, True, True, False, False, False, False]) tm.assert_series_equal(tc2.duplicated(keep="last"), expected) tm.assert_series_equal(tc2.drop_duplicates(keep="last"), tc2[~expected]) sc = tc2.copy() - sc.drop_duplicates(keep="last", inplace=True) + return_value = sc.drop_duplicates(keep="last", inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc2[~expected]) expected = Series([False, True, True, False, True, True, False]) tm.assert_series_equal(tc2.duplicated(keep=False), expected) tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected]) sc = tc2.copy() - sc.drop_duplicates(keep=False, inplace=True) + return_value = sc.drop_duplicates(keep=False, inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc2[~expected]) def test_drop_duplicates_categorical_bool(self, ordered): @@ -141,19 +149,22 @@ def test_drop_duplicates_categorical_bool(self, ordered): tm.assert_series_equal(tc.duplicated(), expected) tm.assert_series_equal(tc.drop_duplicates(), tc[~expected]) sc = tc.copy() - sc.drop_duplicates(inplace=True) + return_value = sc.drop_duplicates(inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc[~expected]) expected = Series([True, True, False, False]) tm.assert_series_equal(tc.duplicated(keep="last"), expected) tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected]) sc = tc.copy() - sc.drop_duplicates(keep="last", inplace=True) + return_value = sc.drop_duplicates(keep="last", inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc[~expected]) expected = Series([True, True, True, True]) tm.assert_series_equal(tc.duplicated(keep=False), expected) tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected]) sc = tc.copy() - sc.drop_duplicates(keep=False, inplace=True) + return_value = sc.drop_duplicates(keep=False, inplace=True) + assert return_value is None tm.assert_series_equal(sc, tc[~expected]) diff --git a/pandas/tests/series/methods/test_fillna.py b/pandas/tests/series/methods/test_fillna.py index c34838be24fc1..80b8271e16e7a 100644 --- a/pandas/tests/series/methods/test_fillna.py +++ b/pandas/tests/series/methods/test_fillna.py @@ -67,7 +67,8 @@ def test_fillna_numeric_inplace(self): x = Series([np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"]) y = x.copy() - y.fillna(value=0, inplace=True) + return_value = y.fillna(value=0, inplace=True) + assert return_value is None expected = x.fillna(value=0) tm.assert_series_equal(y, expected) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 8f57cf3191d5d..11802c59a29da 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -13,7 +13,8 @@ def test_replace(self, datetime_series): ser[6:10] = 0 # replace list with a single value - ser.replace([np.nan], -1, inplace=True) + return_value = ser.replace([np.nan], -1, inplace=True) + assert return_value is None exp = ser.fillna(-1) tm.assert_series_equal(ser, exp) @@ -48,7 +49,8 @@ def test_replace(self, datetime_series): tm.assert_series_equal(rs, rs2) # replace inplace - ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + assert return_value is None assert (ser[:5] == -1).all() assert (ser[6:10] == -1).all() @@ -124,7 +126,8 @@ def test_replace_with_single_list(self): tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4])) s = ser.copy() - s.replace([1, 2, 3], inplace=True) + return_value = s.replace([1, 2, 3], inplace=True) + assert return_value is None tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4])) # make sure things don't get corrupted when fillna call fails @@ -134,7 +137,8 @@ def test_replace_with_single_list(self): r"\(bfill\)\. Got crash_cymbal" ) with pytest.raises(ValueError, match=msg): - s.replace([1, 2, 3], inplace=True, method="crash_cymbal") + return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal") + assert return_value is None tm.assert_series_equal(s, ser) def test_replace_with_empty_list(self): @@ -156,7 +160,8 @@ def test_replace_mixed_types(self): def check_replace(to_rep, val, expected): sc = s.copy() r = s.replace(to_rep, val) - sc.replace(to_rep, val, inplace=True) + return_value = sc.replace(to_rep, val, inplace=True) + assert return_value is None tm.assert_series_equal(expected, r) tm.assert_series_equal(expected, sc) @@ -242,7 +247,8 @@ def test_replace2(self): tm.assert_series_equal(rs, rs2) # replace inplace - ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True) + assert return_value is None assert (ser[:5] == -1).all() assert (ser[6:10] == -1).all() assert (ser[20:30] == -1).all() @@ -325,11 +331,13 @@ def test_replace_categorical_single(self): tm.assert_series_equal(expected, result) assert c[2] != "foo" # ensure non-inplace call does not alter original - c.replace(c[2], "foo", inplace=True) + return_value = c.replace(c[2], "foo", inplace=True) + assert return_value is None tm.assert_series_equal(expected, c) first_value = c[0] - c.replace(c[1], c[0], inplace=True) + return_value = c.replace(c[1], c[0], inplace=True) + assert return_value is None assert c[0] == c[1] == first_value # test replacing with existing value def test_replace_with_no_overflowerror(self): diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py index 597b43a370ef5..1474bb95f4af2 100644 --- a/pandas/tests/series/methods/test_reset_index.py +++ b/pandas/tests/series/methods/test_reset_index.py @@ -22,7 +22,8 @@ def test_reset_index(self): # check inplace s = ser.reset_index(drop=True) s2 = ser - s2.reset_index(drop=True, inplace=True) + return_value = s2.reset_index(drop=True, inplace=True) + assert return_value is None tm.assert_series_equal(s, s2) # level diff --git a/pandas/tests/series/methods/test_sort_values.py b/pandas/tests/series/methods/test_sort_values.py index b32c59b4daa0d..b49e39d4592ea 100644 --- a/pandas/tests/series/methods/test_sort_values.py +++ b/pandas/tests/series/methods/test_sort_values.py @@ -65,7 +65,8 @@ def test_sort_values(self, datetime_series): # inplace=True ts = datetime_series.copy() - ts.sort_values(ascending=False, inplace=True) + return_value = ts.sort_values(ascending=False, inplace=True) + assert return_value is None tm.assert_series_equal(ts, datetime_series.sort_values(ascending=False)) tm.assert_index_equal( ts.index, datetime_series.sort_values(ascending=False).index diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py index 8a2c62cee7e24..7c82edbaec177 100644 --- a/pandas/tests/series/methods/test_truncate.py +++ b/pandas/tests/series/methods/test_truncate.py @@ -136,7 +136,8 @@ def test_truncate_multiindex(self): df = pd.DataFrame.from_dict( {"L1": [2, 2, 3, 3], "L2": ["A", "B", "A", "B"], "col": [2, 3, 4, 5]} ) - df.set_index(["L1", "L2"], inplace=True) + return_value = df.set_index(["L1", "L2"], inplace=True) + assert return_value is None expected = df.col tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 042841bb4e019..b174eb0e42776 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -179,7 +179,8 @@ def test_constructor_dict_timedelta_index(self): def test_sparse_accessor_updates_on_inplace(self): s = pd.Series([1, 1, 2, 3], dtype="Sparse[int]") - s.drop([0, 1], inplace=True) + return_value = s.drop([0, 1], inplace=True) + assert return_value is None assert s.sparse.density == 1.0 def test_tab_completion(self): @@ -459,7 +460,8 @@ def f(x): def test_str_accessor_updates_on_inplace(self): s = pd.Series(list("abc")) - s.drop([0], inplace=True) + return_value = s.drop([0], inplace=True) + assert return_value is None assert len(s.str.lower()) == 2 def test_str_attribute(self): @@ -548,7 +550,8 @@ def test_cat_accessor(self): assert not s.cat.ordered, False exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"]) - s.cat.set_categories(["b", "a"], inplace=True) + return_value = s.cat.set_categories(["b", "a"], inplace=True) + assert return_value is None tm.assert_categorical_equal(s.values, exp) res = s.cat.set_categories(["b", "a"]) @@ -579,8 +582,10 @@ def test_cat_accessor_no_new_attributes(self): def test_cat_accessor_updates_on_inplace(self): s = Series(list("abc")).astype("category") - s.drop(0, inplace=True) - s.cat.remove_unused_categories(inplace=True) + return_value = s.drop(0, inplace=True) + assert return_value is None + return_value = s.cat.remove_unused_categories(inplace=True) + assert return_value is None assert len(s.cat.categories) == 2 def test_categorical_delegations(self): @@ -614,7 +619,8 @@ def test_categorical_delegations(self): assert s.cat.ordered s = s.cat.as_unordered() assert not s.cat.ordered - s.cat.as_ordered(inplace=True) + return_value = s.cat.as_ordered(inplace=True) + assert return_value is None assert s.cat.ordered # reorder diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 0fd51b8828bc5..d2ad9c8c398ea 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -625,7 +625,8 @@ def test_dt_accessor_invalid(self, ser): def test_dt_accessor_updates_on_inplace(self): s = Series(pd.date_range("2018-01-01", periods=10)) s[2] = None - s.fillna(pd.Timestamp("2018-01-01"), inplace=True) + return_value = s.fillna(pd.Timestamp("2018-01-01"), inplace=True) + assert return_value is None result = s.dt.date assert result[0] == result[2] diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 162778e372426..0144e4257efe0 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -453,7 +453,8 @@ def test_fillna_downcast(self): def test_fillna_int(self): s = Series(np.random.randint(-100, 100, 50)) - s.fillna(method="ffill", inplace=True) + return_value = s.fillna(method="ffill", inplace=True) + assert return_value is None tm.assert_series_equal(s.fillna(method="ffill", inplace=False), s) def test_categorical_nan_equality(self): @@ -680,7 +681,8 @@ def test_dropna_empty(self): s = Series([], dtype=object) assert len(s.dropna()) == 0 - s.dropna(inplace=True) + return_value = s.dropna(inplace=True) + assert return_value is None assert len(s) == 0 # invalid axis @@ -729,7 +731,8 @@ def test_dropna_no_nan(self): assert result is not s s2 = s.copy() - s2.dropna(inplace=True) + return_value = s2.dropna(inplace=True) + assert return_value is None tm.assert_series_equal(s2, s) def test_dropna_intervals(self): @@ -775,7 +778,8 @@ def test_pad_nan(self): [np.nan, 1.0, np.nan, 3.0, np.nan], ["z", "a", "b", "c", "d"], dtype=float ) - x.fillna(method="pad", inplace=True) + return_value = x.fillna(method="pad", inplace=True) + assert return_value is None expected = Series( [np.nan, 1.0, 1.0, 3.0, 3.0], ["z", "a", "b", "c", "d"], dtype=float @@ -799,7 +803,8 @@ def test_dropna_preserve_name(self, datetime_series): assert result.name == datetime_series.name name = datetime_series.name ts = datetime_series.copy() - ts.dropna(inplace=True) + return_value = ts.dropna(inplace=True) + assert return_value is None assert ts.name == name def test_series_fillna_limit(self): From f0a7d186dea0610ccf1fe06145a3702bce20ae8b Mon Sep 17 00:00:00 2001 From: Gim Seng <26968986+gimseng@users.noreply.github.com> Date: Fri, 10 Jul 2020 20:33:13 +0100 Subject: [PATCH 098/632] Fix strange behavior when precision display option is zero (#20359) (#35212) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/formats/format.py | 17 +++++++++++------ pandas/tests/io/formats/test_format.py | 9 +++++++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 798a3d838ef7e..d3724112ef455 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1018,6 +1018,7 @@ MultiIndex I/O ^^^ +- Bug in print-out when ``display.precision`` is zero. (:issue:`20359`) - Bug in :meth:`read_json` where integer overflow was occurring when json contains big number strings. (:issue:`30320`) - `read_csv` will now raise a ``ValueError`` when the arguments `header` and `prefix` both are not `None`. (:issue:`27394`) - Bug in :meth:`DataFrame.to_json` was raising ``NotFoundError`` when ``path_or_buf`` was an S3 URI (:issue:`28375`) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 22cdd8e235e0b..27df014620f56 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1382,9 +1382,9 @@ def format_values_with(float_format): if self.fixed_width: if is_complex: - result = _trim_zeros_complex(values, na_rep) + result = _trim_zeros_complex(values, self.decimal, na_rep) else: - result = _trim_zeros_float(values, na_rep) + result = _trim_zeros_float(values, self.decimal, na_rep) return np.asarray(result, dtype="object") return values @@ -1754,19 +1754,21 @@ def just(x): return result -def _trim_zeros_complex(str_complexes: np.ndarray, na_rep: str = "NaN") -> List[str]: +def _trim_zeros_complex( + str_complexes: np.ndarray, decimal: str = ".", na_rep: str = "NaN" +) -> List[str]: """ Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those. """ return [ - "".join(_trim_zeros_float(re.split(r"([j+-])", x), na_rep)) + "".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal, na_rep)) for x in str_complexes ] def _trim_zeros_float( - str_floats: Union[np.ndarray, List[str]], na_rep: str = "NaN" + str_floats: Union[np.ndarray, List[str]], decimal: str = ".", na_rep: str = "NaN" ) -> List[str]: """ Trims zeros, leaving just one before the decimal points if need be. @@ -1778,8 +1780,11 @@ def _is_number(x): def _cond(values): finite = [x for x in values if _is_number(x)] + has_decimal = [decimal in x for x in finite] + return ( len(finite) > 0 + and all(has_decimal) and all(x.endswith("0") for x in finite) and not (any(("e" in x) or ("E" in x) for x in finite)) ) @@ -1788,7 +1793,7 @@ def _cond(values): trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] # leave one 0 after the decimal points if need be. - return [x + "0" if x.endswith(".") and _is_number(x) else x for x in trimmed] + return [x + "0" if x.endswith(decimal) and _is_number(x) else x for x in trimmed] def _has_names(index: Index) -> bool: diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 3c40a2ae8d6b8..e236b3da73c69 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2910,6 +2910,15 @@ def test_format(self): assert result[0] == " 12.0" assert result[1] == " 0.0" + def test_output_display_precision_trailing_zeroes(self): + # Issue #20359: trimming zeros while there is no decimal point + + # Happens when display precision is set to zero + with pd.option_context("display.precision", 0): + s = pd.Series([840.0, 4200.0]) + expected_output = "0 840\n1 4200\ndtype: float64" + assert str(s) == expected_output + def test_output_significant_digits(self): # Issue #9764 From 0f86110aee4126bd8cab6ba2a40c28b81a9a0d35 Mon Sep 17 00:00:00 2001 From: Zeb Nicholls Date: Sat, 11 Jul 2020 06:33:17 +1000 Subject: [PATCH 099/632] DOC: Add pint pandas ecosystem docs (#35170) --- doc/source/ecosystem.rst | 10 ++++++++++ web/pandas/community/ecosystem.md | 17 +++++++++++++---- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 72e24e34bc5c1..b02d4abd3ddf8 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -421,6 +421,14 @@ found in NumPy or pandas, which work well with pandas' data containers. Cyberpandas provides an extension type for storing arrays of IP Addresses. These arrays can be stored inside pandas' Series and DataFrame. +`Pint-Pandas`_ +~~~~~~~~~~~~~~ + +`Pint-Pandas ` provides an extension type for +storing numeric arrays with units. These arrays can be stored inside pandas' +Series and DataFrame. Operations between Series and DataFrame columns which +use pint's extension array are then units aware. + .. _ecosystem.accessors: Accessors @@ -436,6 +444,7 @@ Library Accessor Classes Description `cyberpandas`_ ``ip`` ``Series`` Provides common operations for working with IP addresses. `pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library. `pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series. +`pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames. =============== ========== ========================= =============================================================== .. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest @@ -443,3 +452,4 @@ Library Accessor Classes Description .. _Altair: https://altair-viz.github.io/ .. _pandas_path: https://github.com/drivendataorg/pandas-path/ .. _pathlib.Path: https://docs.python.org/3/library/pathlib.html +.. _pint-pandas: https://github.com/hgrecco/pint-pandas diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index 715a84c1babc6..be109ea53eb7d 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -353,13 +353,22 @@ Cyberpandas provides an extension type for storing arrays of IP Addresses. These arrays can be stored inside pandas' Series and DataFrame. +### [Pint-Pandas](https://github.com/hgrecco/pint-pandas) + +Pint-Pandas provides an extension type for storing numeric arrays with units. +These arrays can be stored inside pandas' Series and DataFrame. Operations +between Series and DataFrame columns which use pint's extension array are then +units aware. + ## Accessors A directory of projects providing `extension accessors `. This is for users to discover new accessors and for library authors to coordinate on the namespace. - | Library | Accessor | Classes | - | ------------------------------------------------------------|----------|-----------------------| - | [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) | `ip` | `Series` | - | [pdvega](https://altair-viz.github.io/pdvega/) | `vgplot` | `Series`, `DataFrame` | + | Library | Accessor | Classes | + | --------------------------------------------------------------|----------|-----------------------| + | [cyberpandas](https://cyberpandas.readthedocs.io/en/latest) | `ip` | `Series` | + | [pdvega](https://altair-viz.github.io/pdvega/) | `vgplot` | `Series`, `DataFrame` | + | [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` | + | [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` | From 283d67287e10300b48800acf7daecd44b6f53a9c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 15:19:55 -0700 Subject: [PATCH 100/632] BUG: ensure_timedelta64ns overflows (#34448) --- .../reference/general_utility_functions.rst | 1 + pandas/_libs/tslibs/__init__.py | 3 +- pandas/_libs/tslibs/conversion.pyx | 40 +++++++++++++++++-- pandas/core/internals/blocks.py | 3 +- pandas/errors/__init__.py | 2 +- pandas/tests/tslibs/test_api.py | 1 + pandas/tests/tslibs/test_conversion.py | 15 ++++++- 7 files changed, 57 insertions(+), 8 deletions(-) diff --git a/doc/source/reference/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst index 72a84217323ab..c1759110b94ad 100644 --- a/doc/source/reference/general_utility_functions.rst +++ b/doc/source/reference/general_utility_functions.rst @@ -43,6 +43,7 @@ Exceptions and warnings errors.NullFrequencyError errors.NumbaUtilError errors.OutOfBoundsDatetime + errors.OutOfBoundsTimedelta errors.ParserError errors.ParserWarning errors.PerformanceWarning diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 0ae4cc97d07e3..7723140e3eab1 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -7,6 +7,7 @@ "nat_strings", "is_null_datetimelike", "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", "IncompatibleFrequency", "Period", "Resolution", @@ -26,7 +27,7 @@ ] from . import dtypes -from .conversion import localize_pydatetime +from .conversion import OutOfBoundsTimedelta, localize_pydatetime from .dtypes import Resolution from .nattype import NaT, NaTType, iNaT, is_null_datetimelike, nat_strings from .np_datetime import OutOfBoundsDatetime diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 31d2d0e9572f5..85da7a60a029a 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -51,6 +51,15 @@ DT64NS_DTYPE = np.dtype('M8[ns]') TD64NS_DTYPE = np.dtype('m8[ns]') +class OutOfBoundsTimedelta(ValueError): + """ + Raised when encountering a timedelta value that cannot be represented + as a timedelta64[ns]. + """ + # Timedelta analogue to OutOfBoundsDatetime + pass + + # ---------------------------------------------------------------------- # Unit Conversion Helpers @@ -228,11 +237,34 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool=True): Returns ------- - result : ndarray with dtype timedelta64[ns] - + ndarray[timedelta64[ns]] """ - return arr.astype(TD64NS_DTYPE, copy=copy) - # TODO: check for overflows when going from a lower-resolution to nanos + assert arr.dtype.kind == "m", arr.dtype + + if arr.dtype == TD64NS_DTYPE: + return arr.copy() if copy else arr + + # Re-use the datetime64 machinery to do an overflow-safe `astype` + dtype = arr.dtype.str.replace("m8", "M8") + dummy = arr.view(dtype) + try: + dt64_result = ensure_datetime64ns(dummy, copy) + except OutOfBoundsDatetime as err: + # Re-write the exception in terms of timedelta64 instead of dt64 + + # Find the value that we are going to report as causing an overflow + tdmin = arr.min() + tdmax = arr.max() + if np.abs(tdmin) >= np.abs(tdmax): + bad_val = tdmin + else: + bad_val = tdmax + + raise OutOfBoundsTimedelta( + f"Out of bounds for nanosecond {arr.dtype.name} {bad_val}" + ) + + return dt64_result.view(TD64NS_DTYPE) # ---------------------------------------------------------------------- diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index d8779dae7c384..6a4b3318d3aa7 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2302,7 +2302,8 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): def __init__(self, values, placement, ndim=None): if values.dtype != TD64NS_DTYPE: - values = conversion.ensure_timedelta64ns(values) + # e.g. non-nano or int64 + values = TimedeltaArray._from_sequence(values)._data if isinstance(values, TimedeltaArray): values = values._data assert isinstance(values, np.ndarray), type(values) diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index e3427d93f3d84..6ac3004d29996 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -6,7 +6,7 @@ from pandas._config.config import OptionError -from pandas._libs.tslibs import OutOfBoundsDatetime +from pandas._libs.tslibs import OutOfBoundsDatetime, OutOfBoundsTimedelta class NullFrequencyError(ValueError): diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 036037032031a..eca444c9ceb34 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -32,6 +32,7 @@ def test_namespace(): "is_null_datetimelike", "nat_strings", "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", "Period", "IncompatibleFrequency", "Resolution", diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index b35940c6bb95b..4f184b78f34a1 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -4,7 +4,13 @@ import pytest from pytz import UTC -from pandas._libs.tslibs import conversion, iNaT, timezones, tzconversion +from pandas._libs.tslibs import ( + OutOfBoundsTimedelta, + conversion, + iNaT, + timezones, + tzconversion, +) from pandas import Timestamp, date_range import pandas._testing as tm @@ -89,6 +95,13 @@ def test_ensure_datetime64ns_bigendian(): tm.assert_numpy_array_equal(result, expected) +def test_ensure_timedelta64ns_overflows(): + arr = np.arange(10).astype("m8[Y]") * 100 + msg = r"Out of bounds for nanosecond timedelta64\[Y\] 900" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + conversion.ensure_timedelta64ns(arr) + + class SubDatetime(datetime): pass From 4585246cc7e55b12c20d86456dab0476930dc3d5 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 15:20:26 -0700 Subject: [PATCH 101/632] asvs for normalize functions (#35221) --- asv_bench/benchmarks/tslibs/normalize.py | 32 ++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 asv_bench/benchmarks/tslibs/normalize.py diff --git a/asv_bench/benchmarks/tslibs/normalize.py b/asv_bench/benchmarks/tslibs/normalize.py new file mode 100644 index 0000000000000..7d4e0556f4d96 --- /dev/null +++ b/asv_bench/benchmarks/tslibs/normalize.py @@ -0,0 +1,32 @@ +try: + from pandas._libs.tslibs import normalize_i8_timestamps, is_date_array_normalized +except ImportError: + from pandas._libs.tslibs.conversion import ( + normalize_i8_timestamps, + is_date_array_normalized, + ) + +import pandas as pd + +from .tslib import _sizes, _tzs + + +class Normalize: + params = [ + _sizes, + _tzs, + ] + param_names = ["size", "tz"] + + def setup(self, size, tz): + # use an array that will have is_date_array_normalized give True, + # so we do not short-circuit early. + dti = pd.date_range("2016-01-01", periods=10, tz=tz).repeat(size // 10) + self.i8data = dti.asi8 + + def time_normalize_i8_timestamps(self, size, tz): + normalize_i8_timestamps(self.i8data, tz) + + def time_is_date_array_normalized(self, size, tz): + # TODO: cases with different levels of short-circuiting + is_date_array_normalized(self.i8data, tz) From 32f04a3126fd66e7b245503e10f1bc456aa3f93b Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 15:20:51 -0700 Subject: [PATCH 102/632] CLN: remove unused freq kwarg in libparsing (#35167) --- pandas/_libs/tslibs/parsing.pyx | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 92654f3b587e5..c4f369d0d3b3f 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -197,7 +197,6 @@ cdef inline bint does_string_look_like_time(str parse_string): def parse_datetime_string( str date_string, - object freq=None, bint dayfirst=False, bint yearfirst=False, **kwargs, @@ -228,7 +227,7 @@ def parse_datetime_string( return dt try: - dt, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq) + dt, _ = _parse_dateabbr_string(date_string, _DEFAULT_DATETIME, freq=None) return dt except DateParseError: raise @@ -265,9 +264,6 @@ def parse_time_string(arg: str, freq=None, dayfirst=None, yearfirst=None): ------- datetime, datetime/dateutil.parser._result, str """ - if not isinstance(arg, str): - raise TypeError("parse_time_string argument must be str") - if is_offset_object(freq): freq = freq.rule_code From bc901abc267018d02735792f14b332dfb02999ec Mon Sep 17 00:00:00 2001 From: Evan Kanter <34668740+evank28@users.noreply.github.com> Date: Fri, 10 Jul 2020 18:22:50 -0400 Subject: [PATCH 103/632] Update 02_read_write.rst (#35222) --- doc/source/getting_started/intro_tutorials/02_read_write.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/getting_started/intro_tutorials/02_read_write.rst b/doc/source/getting_started/intro_tutorials/02_read_write.rst index 12fa2a1e094d6..c6c6bfefc4303 100644 --- a/doc/source/getting_started/intro_tutorials/02_read_write.rst +++ b/doc/source/getting_started/intro_tutorials/02_read_write.rst @@ -151,7 +151,7 @@ named *passengers* instead of the default *Sheet1*. By setting -The equivalent read function :meth:`~DataFrame.to_excel` will reload the data to a +The equivalent read function :meth:`~DataFrame.read_excel` will reload the data to a ``DataFrame``: .. ipython:: python From 8fd4cd3436b0aae45a6ba505cd663ebc67d5fbfe Mon Sep 17 00:00:00 2001 From: rhshadrach <45562402+rhshadrach@users.noreply.github.com> Date: Fri, 10 Jul 2020 18:46:41 -0400 Subject: [PATCH 104/632] BUG: transform with nunique should have dtype int64 (#35152) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/common.py | 21 ++++++++++++++++++- pandas/core/groupby/generic.py | 30 ++++++++++------------------ pandas/tests/groupby/test_nunique.py | 8 ++++++++ 4 files changed, 40 insertions(+), 20 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index d3724112ef455..a4c107ddefd7b 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1089,6 +1089,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.groupby` lost index, when one of the ``agg`` keys referenced an empty list (:issue:`32580`) - Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`) - Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`) +- Bug in :meth:`core.groupby.DataFrameGroupBy.transform` when ``func='nunique'`` and columns are of type ``datetime64``, the result would also be of type ``datetime64`` instead of ``int64`` (:issue:`35109`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/common.py b/pandas/core/common.py index b4f726f4e59a9..e7260a9923ee0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -5,10 +5,11 @@ """ from collections import abc, defaultdict +import contextlib from datetime import datetime, timedelta from functools import partial import inspect -from typing import Any, Collection, Iterable, List, Union +from typing import Any, Collection, Iterable, Iterator, List, Union import warnings import numpy as np @@ -502,3 +503,21 @@ def convert_to_list_like( return list(values) return [values] + + +@contextlib.contextmanager +def temp_setattr(obj, attr: str, value) -> Iterator[None]: + """Temporarily set attribute on an object. + + Args: + obj: Object whose attribute will be modified. + attr: Attribute to modify. + value: Value to temporarily set attribute to. + + Yields: + obj with modified attribute. + """ + old_value = getattr(obj, attr) + setattr(obj, attr, value) + yield obj + setattr(obj, attr, old_value) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7f2eac520264d..1f49ee2b0b665 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -500,8 +500,10 @@ def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs): # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result # and deal with possible broadcasting below. - result = getattr(self, func)(*args, **kwargs) - return self._transform_fast(result, func) + # Temporarily set observed for dealing with categoricals. + with com.temp_setattr(self, "observed", True): + result = getattr(self, func)(*args, **kwargs) + return self._transform_fast(result) def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs @@ -554,17 +556,14 @@ def _transform_general( result.index = self._selected_obj.index return result - def _transform_fast(self, result, func_nm: str) -> Series: + def _transform_fast(self, result) -> Series: """ fast version of transform, only applicable to builtin/cythonizable functions """ ids, _, ngroup = self.grouper.group_info result = result.reindex(self.grouper.result_index, copy=False) - cast = self._transform_should_cast(func_nm) out = algorithms.take_1d(result._values, ids) - if cast: - out = maybe_cast_result(out, self.obj, how=func_nm) return self.obj._constructor(out, index=self.obj.index, name=self.obj.name) def filter(self, func, dropna=True, *args, **kwargs): @@ -1465,25 +1464,23 @@ def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs): # If func is a reduction, we need to broadcast the # result to the whole group. Compute func result # and deal with possible broadcasting below. - result = getattr(self, func)(*args, **kwargs) + # Temporarily set observed for dealing with categoricals. + with com.temp_setattr(self, "observed", True): + result = getattr(self, func)(*args, **kwargs) if isinstance(result, DataFrame) and result.columns.equals( self._obj_with_exclusions.columns ): - return self._transform_fast(result, func) + return self._transform_fast(result) return self._transform_general( func, engine=engine, engine_kwargs=engine_kwargs, *args, **kwargs ) - def _transform_fast(self, result: DataFrame, func_nm: str) -> DataFrame: + def _transform_fast(self, result: DataFrame) -> DataFrame: """ Fast transform path for aggregations """ - # if there were groups with no observations (Categorical only?) - # try casting data to original dtype - cast = self._transform_should_cast(func_nm) - obj = self._obj_with_exclusions # for each col, reshape to to size of original frame @@ -1492,12 +1489,7 @@ def _transform_fast(self, result: DataFrame, func_nm: str) -> DataFrame: result = result.reindex(self.grouper.result_index, copy=False) output = [] for i, _ in enumerate(result.columns): - res = algorithms.take_1d(result.iloc[:, i].values, ids) - # TODO: we have no test cases that get here with EA dtypes; - # maybe_cast_result may not be needed if EAs never get here - if cast: - res = maybe_cast_result(res, obj.iloc[:, i], how=func_nm) - output.append(res) + output.append(algorithms.take_1d(result.iloc[:, i].values, ids)) return self.obj._constructor._from_arrays( output, columns=result.columns, index=obj.index diff --git a/pandas/tests/groupby/test_nunique.py b/pandas/tests/groupby/test_nunique.py index 1475b1ce2907c..c3347b7ae52f3 100644 --- a/pandas/tests/groupby/test_nunique.py +++ b/pandas/tests/groupby/test_nunique.py @@ -167,3 +167,11 @@ def test_nunique_preserves_column_level_names(): result = test.groupby([0, 0, 0]).nunique() expected = pd.DataFrame([2], columns=test.columns) tm.assert_frame_equal(result, expected) + + +def test_nunique_transform_with_datetime(): + # GH 35109 - transform with nunique on datetimes results in integers + df = pd.DataFrame(date_range("2008-12-31", "2009-01-02"), columns=["date"]) + result = df.groupby([0, 0, 1])["date"].transform("nunique") + expected = pd.Series([2, 2, 1], name="date") + tm.assert_series_equal(result, expected) From 4fc4622ff50253e4fad73ebe5222ff9cceca5e06 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 10 Jul 2020 16:54:34 -0700 Subject: [PATCH 105/632] REF: move registry, Registry to dtypes.base (#34830) --- pandas/api/extensions/__init__.py | 2 +- pandas/core/arrays/integer.py | 2 +- pandas/core/arrays/sparse/dtype.py | 3 +- pandas/core/arrays/string_.py | 3 +- pandas/core/construction.py | 2 +- pandas/core/dtypes/base.py | 91 +++++++++++++++++++++++++++++- pandas/core/dtypes/common.py | 2 +- pandas/core/dtypes/dtypes.py | 91 +----------------------------- pandas/tests/arrays/test_array.py | 2 +- pandas/tests/arrays/test_period.py | 3 +- pandas/tests/dtypes/test_dtypes.py | 2 +- 11 files changed, 101 insertions(+), 102 deletions(-) diff --git a/pandas/api/extensions/__init__.py b/pandas/api/extensions/__init__.py index 3019dd0e9b371..401e7081d2422 100644 --- a/pandas/api/extensions/__init__.py +++ b/pandas/api/extensions/__init__.py @@ -4,7 +4,7 @@ from pandas._libs.lib import no_default -from pandas.core.dtypes.dtypes import ExtensionDtype, register_extension_dtype +from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype from pandas.core.accessor import ( register_dataframe_accessor, diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index b5cb681812939..b0958af41158c 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -10,6 +10,7 @@ from pandas.compat.numpy import function as nv from pandas.util._decorators import cache_readonly +from pandas.core.dtypes.base import register_extension_dtype from pandas.core.dtypes.common import ( is_bool_dtype, is_datetime64_dtype, @@ -21,7 +22,6 @@ is_object_dtype, pandas_dtype, ) -from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.missing import isna from pandas.core import ops diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index b3da9cbeb44af..ccf2825162f51 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -9,7 +9,7 @@ from pandas._typing import Dtype, DtypeObj from pandas.errors import PerformanceWarning -from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( is_bool_dtype, @@ -19,7 +19,6 @@ is_string_dtype, pandas_dtype, ) -from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.missing import isna, na_value_for_dtype if TYPE_CHECKING: diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index ac501a8afbe09..5104e3f12f5b4 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -5,9 +5,8 @@ from pandas._libs import lib, missing as libmissing -from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype from pandas.core.dtypes.common import pandas_dtype -from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import is_array_like diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 9ac661f97a56e..6c58698989e96 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -15,6 +15,7 @@ from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj +from pandas.core.dtypes.base import ExtensionDtype, registry from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na, @@ -36,7 +37,6 @@ is_object_dtype, is_timedelta64_ns_dtype, ) -from pandas.core.dtypes.dtypes import ExtensionDtype, registry from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndexClass, diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index 2d81dd4d884a3..07c73876954d0 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -2,7 +2,7 @@ Extend pandas with custom array types. """ -from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type +from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type, Union import numpy as np @@ -352,3 +352,92 @@ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: return self else: return None + + +def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]: + """ + Register an ExtensionType with pandas as class decorator. + + .. versionadded:: 0.24.0 + + This enables operations like ``.astype(name)`` for the name + of the ExtensionDtype. + + Returns + ------- + callable + A class decorator. + + Examples + -------- + >>> from pandas.api.extensions import register_extension_dtype + >>> from pandas.api.extensions import ExtensionDtype + >>> @register_extension_dtype + ... class MyExtensionDtype(ExtensionDtype): + ... name = "myextension" + """ + registry.register(cls) + return cls + + +class Registry: + """ + Registry for dtype inference. + + The registry allows one to map a string repr of a extension + dtype to an extension dtype. The string alias can be used in several + places, including + + * Series and Index constructors + * :meth:`pandas.array` + * :meth:`pandas.Series.astype` + + Multiple extension types can be registered. + These are tried in order. + """ + + def __init__(self): + self.dtypes: List[Type[ExtensionDtype]] = [] + + def register(self, dtype: Type[ExtensionDtype]) -> None: + """ + Parameters + ---------- + dtype : ExtensionDtype class + """ + if not issubclass(dtype, ExtensionDtype): + raise ValueError("can only register pandas extension dtypes") + + self.dtypes.append(dtype) + + def find( + self, dtype: Union[Type[ExtensionDtype], str] + ) -> Optional[Type[ExtensionDtype]]: + """ + Parameters + ---------- + dtype : Type[ExtensionDtype] or str + + Returns + ------- + return the first matching dtype, otherwise return None + """ + if not isinstance(dtype, str): + dtype_type = dtype + if not isinstance(dtype, type): + dtype_type = type(dtype) + if issubclass(dtype_type, ExtensionDtype): + return dtype + + return None + + for dtype_type in self.dtypes: + try: + return dtype_type.construct_from_string(dtype) + except TypeError: + pass + + return None + + +registry = Registry() diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 9e960375e9bf4..a2ca4d84b2bf6 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -11,13 +11,13 @@ from pandas._libs.tslibs import conversion from pandas._typing import ArrayLike, DtypeObj +from pandas.core.dtypes.base import registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, - registry, ) from pandas.core.dtypes.generic import ABCCategorical, ABCIndexClass from pandas.core.dtypes.inference import ( # noqa:F401 diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index a9d2430717e4f..22480fbc47508 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -24,7 +24,7 @@ from pandas._libs.tslibs.offsets import BaseOffset from pandas._typing import DtypeObj, Ordered -from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndexClass from pandas.core.dtypes.inference import is_bool, is_list_like @@ -40,95 +40,6 @@ str_type = str -def register_extension_dtype(cls: Type[ExtensionDtype]) -> Type[ExtensionDtype]: - """ - Register an ExtensionType with pandas as class decorator. - - .. versionadded:: 0.24.0 - - This enables operations like ``.astype(name)`` for the name - of the ExtensionDtype. - - Returns - ------- - callable - A class decorator. - - Examples - -------- - >>> from pandas.api.extensions import register_extension_dtype - >>> from pandas.api.extensions import ExtensionDtype - >>> @register_extension_dtype - ... class MyExtensionDtype(ExtensionDtype): - ... pass - """ - registry.register(cls) - return cls - - -class Registry: - """ - Registry for dtype inference. - - The registry allows one to map a string repr of a extension - dtype to an extension dtype. The string alias can be used in several - places, including - - * Series and Index constructors - * :meth:`pandas.array` - * :meth:`pandas.Series.astype` - - Multiple extension types can be registered. - These are tried in order. - """ - - def __init__(self): - self.dtypes: List[Type[ExtensionDtype]] = [] - - def register(self, dtype: Type[ExtensionDtype]) -> None: - """ - Parameters - ---------- - dtype : ExtensionDtype class - """ - if not issubclass(dtype, ExtensionDtype): - raise ValueError("can only register pandas extension dtypes") - - self.dtypes.append(dtype) - - def find( - self, dtype: Union[Type[ExtensionDtype], str] - ) -> Optional[Type[ExtensionDtype]]: - """ - Parameters - ---------- - dtype : Type[ExtensionDtype] or str - - Returns - ------- - return the first matching dtype, otherwise return None - """ - if not isinstance(dtype, str): - dtype_type = dtype - if not isinstance(dtype, type): - dtype_type = type(dtype) - if issubclass(dtype_type, ExtensionDtype): - return dtype - - return None - - for dtype_type in self.dtypes: - try: - return dtype_type.construct_from_string(dtype) - except TypeError: - pass - - return None - - -registry = Registry() - - class PandasExtensionDtype(ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom dtype. diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index ad6e6e4a98057..a0525aa511ee2 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -5,7 +5,7 @@ import pytest import pytz -from pandas.core.dtypes.dtypes import registry +from pandas.core.dtypes.base import registry import pandas as pd import pandas._testing as tm diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index 27e6334788284..8887dd0278afe 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -5,7 +5,8 @@ from pandas._libs.tslibs.period import IncompatibleFrequency import pandas.util._test_decorators as td -from pandas.core.dtypes.dtypes import PeriodDtype, registry +from pandas.core.dtypes.base import registry +from pandas.core.dtypes.dtypes import PeriodDtype import pandas as pd import pandas._testing as tm diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index b1fe673e9e2f1..a58dc5e5ec74a 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -4,6 +4,7 @@ import pytest import pytz +from pandas.core.dtypes.base import registry from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical, @@ -22,7 +23,6 @@ DatetimeTZDtype, IntervalDtype, PeriodDtype, - registry, ) import pandas as pd From 331093e204d7ae80f88cabd52de69be1ca007e9a Mon Sep 17 00:00:00 2001 From: Justin Essert Date: Fri, 10 Jul 2020 21:02:54 -0500 Subject: [PATCH 106/632] DF.__setitem__ creates extension column when given extension scalar (#34875) * Bugfix to make DF.__setitem__ create extension column instead of object column when given an extension scalar * removed bad whitespace * Apply suggestions from code review Checking if extension dtype via built in function instead of manually Co-authored-by: Tom Augspurger * added missing : * modified cast_extension_scalar_to_array test to include an Interval type * added user-facing test for extension type bug * fixed pep8 issues * added note about bug in setting series to scalar extension type * corrected order of imports * corrected order of imports * fixed black formatting errors * removed extra comma * updated cast_scalar_to_arr to support tuple shape for extension dtype * removed unneeded code * added coverage for datetime with timezone in extension_array test * added TODO * correct line that was too long * fixed dtype issue with tz test * creating distinct arrays for each column * resolving mypy error * added docstring info and test * removed unneeded import * flattened else case in init * refactored extension type column fix * reverted docstring changes * reverted docstring changes * removed unneeded imports * reverted test changes * fixed construct_1d_arraylike bug * reorganized if statements * moved what's new statement to correct file * created new test for period df construction * added assert_frame_equal to period_data test * Using pandas array instead of df constructor for better test Co-authored-by: Joris Van den Bossche * changed wording * pylint fixes * parameterized test and added comment * removed extra comma * parameterized test * renamed test Co-authored-by: Tom Augspurger Co-authored-by: Joris Van den Bossche --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/frame.py | 48 ++++++++++++++----- pandas/tests/frame/indexing/test_setitem.py | 33 ++++++++++++- .../tests/frame/methods/test_combine_first.py | 8 +++- pandas/tests/frame/test_constructors.py | 28 ++++++++++- 5 files changed, 103 insertions(+), 15 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index a4c107ddefd7b..eb4075927b6aa 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1146,6 +1146,7 @@ ExtensionArray - Fixed bug where :meth:`StringArray.memory_usage` was not implemented (:issue:`33963`) - Fixed bug where :meth:`DataFrameGroupBy` would ignore the ``min_count`` argument for aggregations on nullable boolean dtypes (:issue:`34051`) - Fixed bug that `DataFrame(columns=.., dtype='string')` would fail (:issue:`27953`, :issue:`33623`) +- Bug where :class:`DataFrame` column set to scalar extension type was considered an object type rather than the extension type (:issue:`34832`) - Fixed bug in ``IntegerArray.astype`` to correctly copy the mask as well (:issue:`34931`). Other diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 10539ab74b4aa..cfe5621fec14e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -520,25 +520,43 @@ def __init__( mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy) else: mgr = init_dict({}, index, columns, dtype=dtype) + # For data is scalar else: - try: - arr = np.array(data, dtype=dtype, copy=copy) - except (ValueError, TypeError) as err: - exc = TypeError( - "DataFrame constructor called with " - f"incompatible data and dtype: {err}" - ) - raise exc from err + if index is None or columns is None: + raise ValueError("DataFrame constructor not properly called!") + + if not dtype: + dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) + + # For data is a scalar extension dtype + if is_extension_array_dtype(dtype): + + values = [ + construct_1d_arraylike_from_scalar(data, len(index), dtype) + for _ in range(len(columns)) + ] + mgr = arrays_to_mgr(values, columns, index, columns, dtype=None) + else: + # Attempt to coerce to a numpy array + try: + arr = np.array(data, dtype=dtype, copy=copy) + except (ValueError, TypeError) as err: + exc = TypeError( + "DataFrame constructor called with " + f"incompatible data and dtype: {err}" + ) + raise exc from err + + if arr.ndim != 0: + raise ValueError("DataFrame constructor not properly called!") - if arr.ndim == 0 and index is not None and columns is not None: values = cast_scalar_to_array( (len(index), len(columns)), data, dtype=dtype ) + mgr = init_ndarray( values, index, columns, dtype=values.dtype, copy=False ) - else: - raise ValueError("DataFrame constructor not properly called!") NDFrame.__init__(self, mgr) @@ -3740,7 +3758,13 @@ def reindexer(value): infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) # upcast - value = cast_scalar_to_array(len(self.index), value) + if is_extension_array_dtype(infer_dtype): + value = construct_1d_arraylike_from_scalar( + value, len(self.index), infer_dtype + ) + else: + value = cast_scalar_to_array(len(self.index), value) + value = maybe_cast_to_datetime(value, infer_dtype) # return internal types directly diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 8fcdae95fbab5..9bb5338f1e07f 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -1,7 +1,18 @@ import numpy as np import pytest -from pandas import Categorical, DataFrame, Index, Series, Timestamp, date_range +from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype + +from pandas import ( + Categorical, + DataFrame, + Index, + Interval, + Period, + Series, + Timestamp, + date_range, +) import pandas._testing as tm from pandas.core.arrays import SparseArray @@ -150,3 +161,23 @@ def test_setitem_dict_preserves_dtypes(self): "c": float(b), } tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "obj,dtype", + [ + (Period("2020-01"), PeriodDtype("M")), + (Interval(left=0, right=5), IntervalDtype("int64")), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(tz="US/Eastern"), + ), + ], + ) + def test_setitem_extension_types(self, obj, dtype): + # GH: 34832 + expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)}) + + df = DataFrame({"idx": [1, 2, 3]}) + df["obj"] = obj + + tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/frame/methods/test_combine_first.py b/pandas/tests/frame/methods/test_combine_first.py index 7715cb1cb6eec..78f265d32f8df 100644 --- a/pandas/tests/frame/methods/test_combine_first.py +++ b/pandas/tests/frame/methods/test_combine_first.py @@ -199,12 +199,14 @@ def test_combine_first_timezone(self): columns=["UTCdatetime", "abc"], data=data1, index=pd.date_range("20140627", periods=1), + dtype="object", ) data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC") df2 = pd.DataFrame( columns=["UTCdatetime", "xyz"], data=data2, index=pd.date_range("20140628", periods=1), + dtype="object", ) res = df2[["UTCdatetime"]].combine_first(df1) exp = pd.DataFrame( @@ -217,10 +219,14 @@ def test_combine_first_timezone(self): }, columns=["UTCdatetime", "abc"], index=pd.date_range("20140627", periods=2, freq="D"), + dtype="object", ) - tm.assert_frame_equal(res, exp) assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]" assert res["abc"].dtype == "datetime64[ns, UTC]" + # Need to cast all to "obejct" because combine_first does not retain dtypes: + # GH Issue 7509 + res = res.astype("object") + tm.assert_frame_equal(res, exp) # see gh-10567 dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC") diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index ab4f7781467e7..64ae29e6de63c 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -14,13 +14,16 @@ from pandas.compat.numpy import _np_version_under1p19 from pandas.core.dtypes.common import is_integer_dtype +from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype import pandas as pd from pandas import ( Categorical, DataFrame, Index, + Interval, MultiIndex, + Period, RangeIndex, Series, Timedelta, @@ -700,7 +703,7 @@ def create_data(constructor): tm.assert_frame_equal(result_timedelta, expected) tm.assert_frame_equal(result_Timedelta, expected) - def test_constructor_period(self): + def test_constructor_period_dict(self): # PeriodIndex a = pd.PeriodIndex(["2012-01", "NaT", "2012-04"], freq="M") b = pd.PeriodIndex(["2012-02-01", "2012-03-01", "NaT"], freq="D") @@ -713,6 +716,29 @@ def test_constructor_period(self): assert df["a"].dtype == a.dtype assert df["b"].dtype == b.dtype + @pytest.mark.parametrize( + "data,dtype", + [ + (Period("2020-01"), PeriodDtype("M")), + (Interval(left=0, right=5), IntervalDtype("int64")), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(tz="US/Eastern"), + ), + ], + ) + def test_constructor_extension_scalar_data(self, data, dtype): + # GH 34832 + df = DataFrame(index=[0, 1], columns=["a", "b"], data=data) + + assert df["a"].dtype == dtype + assert df["b"].dtype == dtype + + arr = pd.array([data] * 2, dtype=dtype) + expected = DataFrame({"a": arr, "b": arr}) + + tm.assert_frame_equal(df, expected) + def test_nested_dict_frame_constructor(self): rng = pd.period_range("1/1/2000", periods=5) df = DataFrame(np.random.randn(10, 5), columns=rng) From 12086b321460942f6cecf0f3f46924650869e331 Mon Sep 17 00:00:00 2001 From: Kaiqi Dong Date: Sat, 11 Jul 2020 04:10:19 +0200 Subject: [PATCH 107/632] TYPING/DOC: Move custom type to _typing and add whatsnew (#35220) * remove \n from docstring * fix issue 17038 * revert change * revert change * move defined typing and add whatsnew --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/_typing.py | 8 ++++++++ pandas/core/aggregation.py | 10 +--------- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index eb4075927b6aa..5f93e08d51baa 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -332,6 +332,7 @@ Other enhancements - :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) +- :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) .. --------------------------------------------------------------------------- diff --git a/pandas/_typing.py b/pandas/_typing.py index 4892abc5f6f51..8e98833ad37f7 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -96,3 +96,11 @@ # DataFrame::sort_index, among others ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] + +# types of `func` kwarg for DataFrame.aggregate and Series.aggregate +AggFuncTypeBase = Union[Callable, str] +AggFuncType = Union[ + AggFuncTypeBase, + List[AggFuncTypeBase], + Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], +] diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 16c4a9f862d79..891048ae82dfd 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -17,7 +17,7 @@ Union, ) -from pandas._typing import Label +from pandas._typing import AggFuncType, Label from pandas.core.dtypes.common import is_dict_like, is_list_like @@ -26,14 +26,6 @@ from pandas.core.indexes.api import Index from pandas.core.series import FrameOrSeriesUnion, Series -# types of `func` kwarg for DataFrame.aggregate and Series.aggregate -AggFuncTypeBase = Union[Callable, str] -AggFuncType = Union[ - AggFuncTypeBase, - List[AggFuncTypeBase], - Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], -] - def reconstruct_func( func: Optional[AggFuncType], **kwargs, From 054b31b069a237600f597aad90d3453fa90d5ec9 Mon Sep 17 00:00:00 2001 From: Paul Sanders Date: Sat, 11 Jul 2020 04:51:36 +0000 Subject: [PATCH 108/632] TST: added test for groupby/apply timezone-aware with copy (#35225) --- pandas/tests/groupby/test_apply.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 1945647ced08f..aa10f44670361 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -995,3 +995,18 @@ def test_apply_function_with_indexing_return_column(): result = df.groupby("foo1", as_index=False).apply(lambda x: x.mean()) expected = DataFrame({"foo1": ["one", "three", "two"], "foo2": [3.0, 4.0, 4.0]}) tm.assert_frame_equal(result, expected) + + +def test_apply_with_timezones_aware(): + # GH: 27212 + + dates = ["2001-01-01"] * 2 + ["2001-01-02"] * 2 + ["2001-01-03"] * 2 + index_no_tz = pd.DatetimeIndex(dates) + index_tz = pd.DatetimeIndex(dates, tz="UTC") + df1 = pd.DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_no_tz}) + df2 = pd.DataFrame({"x": list(range(2)) * 3, "y": range(6), "t": index_tz}) + + result1 = df1.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) + + tm.assert_frame_equal(result1, result2) From 37662ea387e17a4aca1f83445daeea977e145a9b Mon Sep 17 00:00:00 2001 From: Robin to Roxel <35864265+r-toroxel@users.noreply.github.com> Date: Sat, 11 Jul 2020 12:44:56 +0200 Subject: [PATCH 109/632] TST add corner cases in test_constructors (#35216) * TST add test case to drop_duplicates for inplace=True * CLN PEP-8 * TST move to existing test * CLN remove parenthesis * TST test from_tuple corner cases * add comment * CLN run black formatting * CLN refactor case as separate test --- pandas/tests/frame/test_constructors.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 64ae29e6de63c..bfff58d05007f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1204,6 +1204,13 @@ def test_constructor_list_of_odicts(self): expected = DataFrame(index=[0]) tm.assert_frame_equal(result, expected) + def test_constructor_single_row(self): + data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])] + + result = DataFrame(data) + expected = DataFrame.from_dict(dict(zip([0], data)), orient="index") + tm.assert_frame_equal(result, expected.reindex(result.index)) + def test_constructor_ordered_dict_preserve_order(self): # see gh-13304 expected = DataFrame([[2, 1]], columns=["b", "a"]) @@ -1519,16 +1526,17 @@ def test_from_dict_columns_parameter(self): ) @pytest.mark.parametrize( - "data_dict, keys", + "data_dict, keys, orient", [ - ([{("a",): 1}, {("a",): 2}], [("a",)]), - ([OrderedDict([(("a",), 1), (("b",), 2)])], [("a",), ("b",)]), - ([{("a", "b"): 1}], [("a", "b")]), + ({}, [], "index"), + ([{("a",): 1}, {("a",): 2}], [("a",)], "columns"), + ([OrderedDict([(("a",), 1), (("b",), 2)])], [("a",), ("b",)], "columns"), + ([{("a", "b"): 1}], [("a", "b")], "columns"), ], ) - def test_constructor_from_dict_tuples(self, data_dict, keys): + def test_constructor_from_dict_tuples(self, data_dict, keys, orient): # GH 16769 - df = DataFrame.from_dict(data_dict) + df = DataFrame.from_dict(data_dict, orient) result = df.columns expected = Index(keys, dtype="object", tupleize_cols=False) From 9827ce00f95927ddf6a0565450bc175879454d4e Mon Sep 17 00:00:00 2001 From: Robin to Roxel <35864265+r-toroxel@users.noreply.github.com> Date: Sun, 12 Jul 2020 14:05:02 +0200 Subject: [PATCH 110/632] Tst verify return none in tests/frame (#35232) --- .../tests/frame/indexing/test_categorical.py | 5 +- pandas/tests/frame/indexing/test_mask.py | 6 +- pandas/tests/frame/indexing/test_where.py | 42 +++-- pandas/tests/frame/methods/test_clip.py | 3 +- pandas/tests/frame/methods/test_drop.py | 9 +- .../tests/frame/methods/test_interpolate.py | 12 +- pandas/tests/frame/methods/test_rename.py | 3 +- .../tests/frame/methods/test_rename_axis.py | 6 +- pandas/tests/frame/methods/test_replace.py | 163 +++++++++++++----- .../tests/frame/methods/test_reset_index.py | 6 +- pandas/tests/frame/methods/test_set_index.py | 3 +- pandas/tests/frame/methods/test_sort_index.py | 15 +- .../tests/frame/methods/test_sort_values.py | 27 ++- pandas/tests/frame/test_block_internals.py | 3 +- pandas/tests/frame/test_query_eval.py | 18 +- pandas/tests/frame/test_reshape.py | 3 +- pandas/tests/frame/test_to_csv.py | 3 +- 17 files changed, 230 insertions(+), 97 deletions(-) diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index d94dc8d2ffe00..cfc22b9b18729 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -326,7 +326,10 @@ def test_assigning_ops(self): df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf) exp_fancy = exp_multi_row.copy() - exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True) + return_value = exp_fancy["cats"].cat.set_categories( + ["a", "b", "c"], inplace=True + ) + assert return_value is None df[df["cats"] == "c"] = ["b", 2] # category c is kept in .categories diff --git a/pandas/tests/frame/indexing/test_mask.py b/pandas/tests/frame/indexing/test_mask.py index 30db6110efc80..23f3a18881782 100644 --- a/pandas/tests/frame/indexing/test_mask.py +++ b/pandas/tests/frame/indexing/test_mask.py @@ -36,12 +36,14 @@ def test_mask_inplace(self): rdf = df.copy() - rdf.where(cond, inplace=True) + return_value = rdf.where(cond, inplace=True) + assert return_value is None tm.assert_frame_equal(rdf, df.where(cond)) tm.assert_frame_equal(rdf, df.mask(~cond)) rdf = df.copy() - rdf.where(cond, -df, inplace=True) + return_value = rdf.where(cond, -df, inplace=True) + assert return_value is None tm.assert_frame_equal(rdf, df.where(cond, -df)) tm.assert_frame_equal(rdf, df.mask(~cond, -df)) diff --git a/pandas/tests/frame/indexing/test_where.py b/pandas/tests/frame/indexing/test_where.py index 24eb424bd5735..d114a3178b686 100644 --- a/pandas/tests/frame/indexing/test_where.py +++ b/pandas/tests/frame/indexing/test_where.py @@ -162,7 +162,8 @@ def _check_set(df, cond, check_dtypes=True): econd = cond.reindex_like(df).fillna(True) expected = dfi.mask(~econd) - dfi.where(cond, np.nan, inplace=True) + return_value = dfi.where(cond, np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(dfi, expected) # dtypes (and confirm upcasts)x @@ -303,7 +304,8 @@ def test_where_bug(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(result > 2, np.nan, inplace=True) + return_value = result.where(result > 2, np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_where_bug_mixed(self, sint_dtype): @@ -324,7 +326,8 @@ def test_where_bug_mixed(self, sint_dtype): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(result > 2, np.nan, inplace=True) + return_value = result.where(result > 2, np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_where_bug_transposition(self): @@ -417,7 +420,8 @@ def create(): result = df.where(pd.notna(df), df.mean(), axis="columns") tm.assert_frame_equal(result, expected) - df.where(pd.notna(df), df.mean(), inplace=True, axis="columns") + return_value = df.where(pd.notna(df), df.mean(), inplace=True, axis="columns") + assert return_value is None tm.assert_frame_equal(df, expected) df = create().fillna(0) @@ -453,7 +457,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s, axis="index", inplace=True) + return_value = result.where(mask, s, axis="index", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) expected = DataFrame([[0, 1], [0, 1]], dtype="float64") @@ -461,7 +466,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s, axis="columns", inplace=True) + return_value = result.where(mask, s, axis="columns", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) # Upcast needed @@ -474,7 +480,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s, axis="index", inplace=True) + return_value = result.where(mask, s, axis="index", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) expected = DataFrame([[0, np.nan], [0, np.nan]]) @@ -488,7 +495,8 @@ def test_where_axis(self): } ) result = df.copy() - result.where(mask, s, axis="columns", inplace=True) + return_value = result.where(mask, s, axis="columns", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) # Multiple dtypes (=> multiple Blocks) @@ -511,7 +519,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s1, axis="columns", inplace=True) + return_value = result.where(mask, s1, axis="columns", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.where(mask, s2, axis="index") @@ -521,7 +530,8 @@ def test_where_axis(self): tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, s2, axis="index", inplace=True) + return_value = result.where(mask, s2, axis="index", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) # DataFrame vs DataFrame @@ -534,10 +544,12 @@ def test_where_axis(self): result = df.where(mask, d1, axis="index") tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d1, inplace=True) + return_value = result.where(mask, d1, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d1, inplace=True, axis="index") + return_value = result.where(mask, d1, inplace=True, axis="index") + assert return_value is None tm.assert_frame_equal(result, expected) d2 = df.copy().drop(1, axis=1) @@ -549,10 +561,12 @@ def test_where_axis(self): result = df.where(mask, d2, axis="columns") tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d2, inplace=True) + return_value = result.where(mask, d2, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.copy() - result.where(mask, d2, inplace=True, axis="columns") + return_value = result.where(mask, d2, inplace=True, axis="columns") + assert return_value is None tm.assert_frame_equal(result, expected) def test_where_callable(self): diff --git a/pandas/tests/frame/methods/test_clip.py b/pandas/tests/frame/methods/test_clip.py index 34727da3b95ae..ca62b56664518 100644 --- a/pandas/tests/frame/methods/test_clip.py +++ b/pandas/tests/frame/methods/test_clip.py @@ -22,7 +22,8 @@ def test_inplace_clip(self, float_frame): median = float_frame.median().median() frame_copy = float_frame.copy() - frame_copy.clip(upper=median, lower=median, inplace=True) + return_value = frame_copy.clip(upper=median, lower=median, inplace=True) + assert return_value is None assert not (frame_copy.values != median).any() def test_dataframe_clip(self): diff --git a/pandas/tests/frame/methods/test_drop.py b/pandas/tests/frame/methods/test_drop.py index 177d10cdbf615..aa44a2427dc8f 100644 --- a/pandas/tests/frame/methods/test_drop.py +++ b/pandas/tests/frame/methods/test_drop.py @@ -70,8 +70,10 @@ def test_drop_names(self): df_dropped_b = df.drop("b") df_dropped_e = df.drop("e", axis=1) df_inplace_b, df_inplace_e = df.copy(), df.copy() - df_inplace_b.drop("b", inplace=True) - df_inplace_e.drop("e", axis=1, inplace=True) + return_value = df_inplace_b.drop("b", inplace=True) + assert return_value is None + return_value = df_inplace_e.drop("e", axis=1, inplace=True) + assert return_value is None for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e): assert obj.index.name == "first" assert obj.columns.name == "second" @@ -148,7 +150,8 @@ def test_drop(self): # GH#5628 df = pd.DataFrame(np.random.randn(10, 3), columns=list("abc")) expected = df[~(df.b > 0)] - df.drop(labels=df[df.b > 0].index, inplace=True) + return_value = df.drop(labels=df[df.b > 0].index, inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) def test_drop_multiindex_not_lexsorted(self): diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index facb116646573..ddb5723e7bd3e 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -246,11 +246,13 @@ def test_interp_inplace(self): df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]}) expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]}) result = df.copy() - result["a"].interpolate(inplace=True) + return_value = result["a"].interpolate(inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) result = df.copy() - result["a"].interpolate(inplace=True, downcast="infer") + return_value = result["a"].interpolate(inplace=True, downcast="infer") + assert return_value is None tm.assert_frame_equal(result, expected.astype("int64")) def test_interp_inplace_row(self): @@ -259,7 +261,8 @@ def test_interp_inplace_row(self): {"a": [1.0, 2.0, 3.0, 4.0], "b": [np.nan, 2.0, 3.0, 4.0], "c": [3, 2, 2, 2]} ) expected = result.interpolate(method="linear", axis=1, inplace=False) - result.interpolate(method="linear", axis=1, inplace=True) + return_value = result.interpolate(method="linear", axis=1, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_interp_ignore_all_good(self): @@ -297,7 +300,8 @@ def test_interp_time_inplace_axis(self, axis): expected = DataFrame(index=idx, columns=idx, data=data) result = expected.interpolate(axis=0, method="time") - expected.interpolate(axis=0, method="time", inplace=True) + return_value = expected.interpolate(axis=0, method="time", inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("axis_name, axis_number", [("index", 0), ("columns", 1)]) diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py index ffad526d3f4d1..eb908e9472fe2 100644 --- a/pandas/tests/frame/methods/test_rename.py +++ b/pandas/tests/frame/methods/test_rename.py @@ -150,7 +150,8 @@ def test_rename_inplace(self, float_frame): c_id = id(float_frame["C"]) float_frame = float_frame.copy() - float_frame.rename(columns={"C": "foo"}, inplace=True) + return_value = float_frame.rename(columns={"C": "foo"}, inplace=True) + assert return_value is None assert "C" not in float_frame assert "foo" in float_frame diff --git a/pandas/tests/frame/methods/test_rename_axis.py b/pandas/tests/frame/methods/test_rename_axis.py index 9b964d842526c..3339119841813 100644 --- a/pandas/tests/frame/methods/test_rename_axis.py +++ b/pandas/tests/frame/methods/test_rename_axis.py @@ -10,14 +10,16 @@ def test_rename_axis_inplace(self, float_frame): # GH#15704 expected = float_frame.rename_axis("foo") result = float_frame.copy() - no_return = result.rename_axis("foo", inplace=True) + return_value = no_return = result.rename_axis("foo", inplace=True) + assert return_value is None assert no_return is None tm.assert_frame_equal(result, expected) expected = float_frame.rename_axis("bar", axis=1) result = float_frame.copy() - no_return = result.rename_axis("bar", axis=1, inplace=True) + return_value = no_return = result.rename_axis("bar", axis=1, inplace=True) + assert return_value is None assert no_return is None tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 498f7f7790514..ea72a3d8fef4d 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -27,7 +27,8 @@ def test_replace_inplace(self, datetime_frame, float_string_frame): datetime_frame["A"][-5:] = np.nan tsframe = datetime_frame.copy() - tsframe.replace(np.nan, 0, inplace=True) + return_value = tsframe.replace(np.nan, 0, inplace=True) + assert return_value is None tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) # mixed type @@ -40,7 +41,8 @@ def test_replace_inplace(self, datetime_frame, float_string_frame): tm.assert_frame_equal(result, expected) tsframe = datetime_frame.copy() - tsframe.replace([np.nan], [0], inplace=True) + return_value = tsframe.replace([np.nan], [0], inplace=True) + assert return_value is None tm.assert_frame_equal(tsframe, datetime_frame.fillna(0)) def test_regex_replace_scalar(self, mix_ab): @@ -117,18 +119,21 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # regex -> value # obj frame res = dfobj.copy() - res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + return_value = res.replace(r"\s*\.\s*", np.nan, regex=True, inplace=True) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -136,7 +141,8 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + return_value = res.replace(r"\s*(\.)\s*", r"\1\1\1", regex=True, inplace=True) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) @@ -144,18 +150,27 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # everything with compiled regexs as well res = dfobj.copy() - res.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*\.\s*"), np.nan, regex=True, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -163,25 +178,31 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True) + return_value = res.replace( + re.compile(r"\s*(\.)\s*"), r"\1\1\1", regex=True, inplace=True + ) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) tm.assert_frame_equal(res, expec) res = dfobj.copy() - res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + return_value = res.replace(regex=r"\s*\.\s*", value=np.nan, inplace=True) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -189,7 +210,8 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + return_value = res.replace(regex=r"\s*(\.)\s*", value=r"\1\1\1", inplace=True) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) @@ -197,18 +219,27 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # everything with compiled regexs as well res = dfobj.copy() - res.replace(regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfobj, res.fillna(".")) # mixed res = dfmix.copy() - res.replace(regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*\.\s*"), value=np.nan, inplace=True + ) + assert return_value is None tm.assert_frame_equal(dfmix, res.fillna(".")) # regex -> regex # obj frame res = dfobj.copy() - res.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None objc = obj.copy() objc["a"] = ["a", "b", "...", "..."] expec = DataFrame(objc) @@ -216,7 +247,10 @@ def test_regex_replace_scalar_inplace(self, mix_ab): # with mixed res = dfmix.copy() - res.replace(regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True) + return_value = res.replace( + regex=re.compile(r"\s*(\.)\s*"), value=r"\1\1\1", inplace=True + ) + assert return_value is None mixc = mix_ab.copy() mixc["b"] = ["a", "b", "...", "..."] expec = DataFrame(mixc) @@ -290,7 +324,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*\.\s*", r"e|f|g"] values = [np.nan, "crap"] res = dfobj.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", np.nan, np.nan], @@ -304,7 +339,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*(\.)\s*", r"(e|f|g)"] values = [r"\1\1", r"\1_crap"] res = dfobj.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", "..", ".."], @@ -319,7 +355,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*(\.)\s*", r"e"] values = [r"\1\1", r"crap"] res = dfobj.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", "..", ".."], @@ -332,7 +369,8 @@ def test_regex_replace_list_obj_inplace(self): to_replace_res = [r"\s*(\.)\s*", r"e"] values = [r"\1\1", r"crap"] res = dfobj.copy() - res.replace(value=values, regex=to_replace_res, inplace=True) + return_value = res.replace(value=values, regex=to_replace_res, inplace=True) + assert return_value is None expec = DataFrame( { "a": ["a", "b", "..", ".."], @@ -391,7 +429,8 @@ def test_regex_replace_list_mixed_inplace(self, mix_ab): to_replace_res = [r"\s*\.\s*", r"a"] values = [np.nan, "crap"] res = dfmix.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b", np.nan, np.nan]}) tm.assert_frame_equal(res, expec) @@ -399,7 +438,8 @@ def test_regex_replace_list_mixed_inplace(self, mix_ab): to_replace_res = [r"\s*(\.)\s*", r"(a|b)"] values = [r"\1\1", r"\1_crap"] res = dfmix.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["a_crap", "b_crap", "..", ".."]}) tm.assert_frame_equal(res, expec) @@ -408,14 +448,16 @@ def test_regex_replace_list_mixed_inplace(self, mix_ab): to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] values = [r"\1\1", r"crap", r"\1_crap"] res = dfmix.copy() - res.replace(to_replace_res, values, inplace=True, regex=True) + return_value = res.replace(to_replace_res, values, inplace=True, regex=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) tm.assert_frame_equal(res, expec) to_replace_res = [r"\s*(\.)\s*", r"a", r"(b)"] values = [r"\1\1", r"crap", r"\1_crap"] res = dfmix.copy() - res.replace(regex=to_replace_res, value=values, inplace=True) + return_value = res.replace(regex=to_replace_res, value=values, inplace=True) + assert return_value is None expec = DataFrame({"a": mix_ab["a"], "b": ["crap", "b_crap", "..", ".."]}) tm.assert_frame_equal(res, expec) @@ -430,7 +472,10 @@ def test_regex_replace_dict_mixed(self, mix_abc): # frame res = dfmix.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True) res2 = dfmix.copy() - res2.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True) + return_value = res2.replace( + {"b": r"\s*\.\s*"}, {"b": np.nan}, inplace=True, regex=True + ) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} ) @@ -441,7 +486,10 @@ def test_regex_replace_dict_mixed(self, mix_abc): # whole frame res = dfmix.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True) res2 = dfmix.copy() - res2.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True) + return_value = res2.replace( + {"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, inplace=True, regex=True + ) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} ) @@ -450,7 +498,10 @@ def test_regex_replace_dict_mixed(self, mix_abc): res = dfmix.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}) res2 = dfmix.copy() - res2.replace(regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True) + return_value = res2.replace( + regex={"b": r"\s*(\.)\s*"}, value={"b": r"\1ty"}, inplace=True + ) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", ".ty", ".ty"], "c": mix_abc["c"]} ) @@ -464,13 +515,15 @@ def test_regex_replace_dict_mixed(self, mix_abc): ) res = dfmix.replace("a", {"b": np.nan}, regex=True) res2 = dfmix.copy() - res2.replace("a", {"b": np.nan}, regex=True, inplace=True) + return_value = res2.replace("a", {"b": np.nan}, regex=True, inplace=True) + assert return_value is None tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) res = dfmix.replace("a", {"b": np.nan}, regex=True) res2 = dfmix.copy() - res2.replace(regex="a", value={"b": np.nan}, inplace=True) + return_value = res2.replace(regex="a", value={"b": np.nan}, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": [np.nan, "b", ".", "."], "c": mix_abc["c"]} ) @@ -483,9 +536,13 @@ def test_regex_replace_dict_nested(self, mix_abc): res = dfmix.replace({"b": {r"\s*\.\s*": np.nan}}, regex=True) res2 = dfmix.copy() res4 = dfmix.copy() - res2.replace({"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True) + return_value = res2.replace( + {"b": {r"\s*\.\s*": np.nan}}, inplace=True, regex=True + ) + assert return_value is None res3 = dfmix.replace(regex={"b": {r"\s*\.\s*": np.nan}}) - res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) + return_value = res4.replace(regex={"b": {r"\s*\.\s*": np.nan}}, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} ) @@ -519,8 +576,14 @@ def test_regex_replace_list_to_scalar(self, mix_abc): res = df.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True) res2 = df.copy() res3 = df.copy() - res2.replace([r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True) - res3.replace(regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True) + return_value = res2.replace( + [r"\s*\.\s*", "a|b"], np.nan, regex=True, inplace=True + ) + assert return_value is None + return_value = res3.replace( + regex=[r"\s*\.\s*", "a|b"], value=np.nan, inplace=True + ) + assert return_value is None tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) tm.assert_frame_equal(res3, expec) @@ -530,9 +593,11 @@ def test_regex_replace_str_to_numeric(self, mix_abc): df = DataFrame(mix_abc) res = df.replace(r"\s*\.\s*", 0, regex=True) res2 = df.copy() - res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) + return_value = res2.replace(r"\s*\.\s*", 0, inplace=True, regex=True) + assert return_value is None res3 = df.copy() - res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) + return_value = res3.replace(regex=r"\s*\.\s*", value=0, inplace=True) + assert return_value is None expec = DataFrame({"a": mix_abc["a"], "b": ["a", "b", 0, 0], "c": mix_abc["c"]}) tm.assert_frame_equal(res, expec) tm.assert_frame_equal(res2, expec) @@ -542,9 +607,11 @@ def test_regex_replace_regex_list_to_numeric(self, mix_abc): df = DataFrame(mix_abc) res = df.replace([r"\s*\.\s*", "b"], 0, regex=True) res2 = df.copy() - res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) + return_value = res2.replace([r"\s*\.\s*", "b"], 0, regex=True, inplace=True) + assert return_value is None res3 = df.copy() - res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) + return_value = res3.replace(regex=[r"\s*\.\s*", "b"], value=0, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", 0, 0, 0], "c": ["a", 0, np.nan, "d"]} ) @@ -558,9 +625,11 @@ def test_regex_replace_series_of_regexes(self, mix_abc): s2 = Series({"b": np.nan}) res = df.replace(s1, s2, regex=True) res2 = df.copy() - res2.replace(s1, s2, inplace=True, regex=True) + return_value = res2.replace(s1, s2, inplace=True, regex=True) + assert return_value is None res3 = df.copy() - res3.replace(regex=s1, value=s2, inplace=True) + return_value = res3.replace(regex=s1, value=s2, inplace=True) + assert return_value is None expec = DataFrame( {"a": mix_abc["a"], "b": ["a", "b", np.nan, np.nan], "c": mix_abc["c"]} ) @@ -714,7 +783,8 @@ def test_replace_mixed(self, float_string_frame): result = df.replace(0, 0.5) tm.assert_frame_equal(result, expected) - df.replace(0, 0.5, inplace=True) + return_value = df.replace(0, 0.5, inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) # int block splitting @@ -942,7 +1012,8 @@ def test_replace_input_formats_listlike(self): result = df.replace(to_rep, values) expected = df.copy() for i in range(len(to_rep)): - expected.replace(to_rep[i], values[i], inplace=True) + return_value = expected.replace(to_rep[i], values[i], inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) msg = r"Replacement lists must match in length\. Expecting 3 got 2" @@ -969,7 +1040,8 @@ def test_replace_input_formats_scalar(self): result = df.replace(to_rep, -1) expected = df.copy() for i in range(len(to_rep)): - expected.replace(to_rep[i], -1, inplace=True) + return_value = expected.replace(to_rep[i], -1, inplace=True) + assert return_value is None tm.assert_frame_equal(result, expected) def test_replace_limit(self): @@ -1321,7 +1393,8 @@ def test_categorical_replace_with_dict(self, replace_dict, final_data): with pytest.raises(AssertionError, match=msg): # ensure non-inplace call does not affect original tm.assert_frame_equal(df, expected) - df.replace(replace_dict, 3, inplace=True) + return_value = df.replace(replace_dict, 3, inplace=True) + assert return_value is None tm.assert_frame_equal(df, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index cf0bbe144caa5..da4bfa9be4881 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -119,7 +119,8 @@ def test_reset_index(self, float_frame): # test resetting in place df = float_frame.copy() resetted = float_frame.reset_index() - df.reset_index(inplace=True) + return_value = df.reset_index(inplace=True) + assert return_value is None tm.assert_frame_equal(df, resetted, check_names=False) df = float_frame.reset_index().set_index(["index", "A", "B"]) @@ -137,7 +138,8 @@ def test_reset_index_name(self): ) assert df.reset_index().index.name is None assert df.reset_index(drop=True).index.name is None - df.reset_index(inplace=True) + return_value = df.reset_index(inplace=True) + assert return_value is None assert df.index.name is None def test_reset_index_level(self): diff --git a/pandas/tests/frame/methods/test_set_index.py b/pandas/tests/frame/methods/test_set_index.py index 5f62697cc3e43..ebe7eabd53b46 100644 --- a/pandas/tests/frame/methods/test_set_index.py +++ b/pandas/tests/frame/methods/test_set_index.py @@ -137,7 +137,8 @@ def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys): if inplace: result = df.copy() - result.set_index(keys, drop=drop, inplace=True) + return_value = result.set_index(keys, drop=drop, inplace=True) + assert return_value is None else: result = df.set_index(keys, drop=drop) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 543d87485d3c4..5216c3be116e0 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -218,25 +218,29 @@ def test_sort_index_inplace(self): unordered = frame.loc[[3, 2, 4, 1]] a_id = id(unordered["A"]) df = unordered.copy() - df.sort_index(inplace=True) + return_value = df.sort_index(inplace=True) + assert return_value is None expected = frame tm.assert_frame_equal(df, expected) assert a_id != id(df["A"]) df = unordered.copy() - df.sort_index(ascending=False, inplace=True) + return_value = df.sort_index(ascending=False, inplace=True) + assert return_value is None expected = frame[::-1] tm.assert_frame_equal(df, expected) # axis=1 unordered = frame.loc[:, ["D", "B", "C", "A"]] df = unordered.copy() - df.sort_index(axis=1, inplace=True) + return_value = df.sort_index(axis=1, inplace=True) + assert return_value is None expected = frame tm.assert_frame_equal(df, expected) df = unordered.copy() - df.sort_index(axis=1, ascending=False, inplace=True) + return_value = df.sort_index(axis=1, ascending=False, inplace=True) + assert return_value is None expected = frame.iloc[:, ::-1] tm.assert_frame_equal(df, expected) @@ -589,7 +593,8 @@ def test_sort_index_level2(self): # inplace rs = frame.copy() - rs.sort_index(level=0, inplace=True) + return_value = rs.sort_index(level=0, inplace=True) + assert return_value is None tm.assert_frame_equal(rs, frame.sort_index(level=0)) def test_sort_index_level_large_cardinality(self): diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index 1275da01eace9..c60e7e3b1bdb6 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -77,22 +77,28 @@ def test_sort_values_inplace(self): ) sorted_df = frame.copy() - sorted_df.sort_values(by="A", inplace=True) + return_value = sorted_df.sort_values(by="A", inplace=True) + assert return_value is None expected = frame.sort_values(by="A") tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by=1, axis=1, inplace=True) + return_value = sorted_df.sort_values(by=1, axis=1, inplace=True) + assert return_value is None expected = frame.sort_values(by=1, axis=1) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by="A", ascending=False, inplace=True) + return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True) + assert return_value is None expected = frame.sort_values(by="A", ascending=False) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by=["A", "B"], ascending=False, inplace=True) + return_value = sorted_df.sort_values( + by=["A", "B"], ascending=False, inplace=True + ) + assert return_value is None expected = frame.sort_values(by=["A", "B"], ascending=False) tm.assert_frame_equal(sorted_df, expected) @@ -544,17 +550,24 @@ def test_sort_values_inplace_key(self, sort_by_key): ) sorted_df = frame.copy() - sorted_df.sort_values(by="A", inplace=True, key=sort_by_key) + return_value = sorted_df.sort_values(by="A", inplace=True, key=sort_by_key) + assert return_value is None expected = frame.sort_values(by="A", key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by=1, axis=1, inplace=True, key=sort_by_key) + return_value = sorted_df.sort_values( + by=1, axis=1, inplace=True, key=sort_by_key + ) + assert return_value is None expected = frame.sort_values(by=1, axis=1, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) sorted_df = frame.copy() - sorted_df.sort_values(by="A", ascending=False, inplace=True, key=sort_by_key) + return_value = sorted_df.sort_values( + by="A", ascending=False, inplace=True, key=sort_by_key + ) + assert return_value is None expected = frame.sort_values(by="A", ascending=False, key=sort_by_key) tm.assert_frame_equal(sorted_df, expected) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index d5554860c034d..c9fec3215d57f 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -64,7 +64,8 @@ def test_consolidate(self, float_frame): float_frame["F"] = 8.0 assert len(float_frame._mgr.blocks) == 3 - float_frame._consolidate(inplace=True) + return_value = float_frame._consolidate(inplace=True) + assert return_value is None assert len(float_frame._mgr.blocks) == 1 def test_consolidate_inplace(self, float_frame): diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 98a2a33822e3b..628b955a1de92 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -413,7 +413,8 @@ def test_date_index_query(self): df = DataFrame(np.random.randn(n, 3)) df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] tm.assert_frame_equal(res, expec) @@ -425,7 +426,8 @@ def test_date_index_query_with_NaT(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.iloc[0, 0] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query("index < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index < "20130101") & ("20130101" < df.dates3)] tm.assert_frame_equal(res, expec) @@ -438,7 +440,8 @@ def test_date_index_query_with_NaT_duplicates(self): d["dates3"] = date_range("1/1/2014", periods=n) df = DataFrame(d) df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser) expec = df[(df.index.to_series() < "20130101") & ("20130101" < df.dates3)] tm.assert_frame_equal(res, expec) @@ -759,7 +762,8 @@ def test_date_index_query(self): df = DataFrame(np.random.randn(n, 3)) df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query( "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser ) @@ -773,7 +777,8 @@ def test_date_index_query_with_NaT(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.iloc[0, 0] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None res = df.query( "(index < 20130101) & (20130101 < dates3)", engine=engine, parser=parser ) @@ -787,7 +792,8 @@ def test_date_index_query_with_NaT_duplicates(self): df["dates1"] = date_range("1/1/2012", periods=n) df["dates3"] = date_range("1/1/2014", periods=n) df.loc[np.random.rand(n) > 0.5, "dates1"] = pd.NaT - df.set_index("dates1", inplace=True, drop=True) + return_value = df.set_index("dates1", inplace=True, drop=True) + assert return_value is None msg = r"'BoolOp' nodes are not implemented" with pytest.raises(NotImplementedError, match=msg): df.query("index < 20130101 < dates3", engine=engine, parser=parser) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 1634baacf6d6e..6a8f1e7c1aca2 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -473,7 +473,8 @@ def test_stack_ints(self): ) df_named = df.copy() - df_named.columns.set_names(range(3), inplace=True) + return_value = df_named.columns.set_names(range(3), inplace=True) + assert return_value is None tm.assert_frame_equal( df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 2b7b3af8f4705..db7347bb863a5 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -570,7 +570,8 @@ def test_to_csv_headers(self): from_df.to_csv(path, index=False, header=["X", "Y"]) recons = self.read_csv(path) - recons.reset_index(inplace=True) + return_value = recons.reset_index(inplace=True) + assert return_value is None tm.assert_frame_equal(to_df, recons) def test_to_csv_multiindex(self, float_frame, datetime_frame): From f7249109bab9331bd5e262c048c813a48e0101d1 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 13 Jul 2020 05:35:00 -0700 Subject: [PATCH 111/632] CLN: annotate (#35242) --- pandas/_libs/hashing.pyx | 2 +- pandas/_libs/tslib.pyx | 21 +++++++++++---------- pandas/_libs/tslibs/strptime.pyx | 24 ++++++++++++------------ 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index 2d859db22ea23..a98820ca57895 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -15,7 +15,7 @@ DEF dROUNDS = 4 @cython.boundscheck(False) -def hash_object_array(ndarray[object] arr, object key, object encoding='utf8'): +def hash_object_array(ndarray[object] arr, str key, str encoding="utf8"): """ Parameters ---------- diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index d70d0378a2621..35d5cd8f1e275 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -5,6 +5,7 @@ from cpython.datetime cimport ( PyDateTime_Check, PyDateTime_IMPORT, datetime, + tzinfo, ) # import datetime C API PyDateTime_IMPORT @@ -93,8 +94,8 @@ def _test_parse_iso8601(ts: str): @cython.boundscheck(False) def format_array_from_datetime( ndarray[int64_t] values, - object tz=None, - object format=None, + tzinfo tz=None, + str format=None, object na_rep=None ): """ @@ -103,8 +104,8 @@ def format_array_from_datetime( Parameters ---------- values : a 1-d i8 array - tz : the timezone (or None) - format : optional, default is None + tz : tzinfo or None, default None + format : str or None, default None a strftime capable string na_rep : optional, default is None a nat format @@ -360,7 +361,7 @@ cpdef array_to_datetime( str errors='raise', bint dayfirst=False, bint yearfirst=False, - object utc=None, + bint utc=False, bint require_iso8601=False ): """ @@ -386,7 +387,7 @@ cpdef array_to_datetime( dayfirst parsing behavior when encountering datetime strings yearfirst : bool, default False yearfirst parsing behavior when encountering datetime strings - utc : bool, default None + utc : bool, default False indicator whether the dates should be UTC require_iso8601 : bool, default False indicator whether the datetime string should be iso8601 @@ -412,7 +413,7 @@ cpdef array_to_datetime( bint is_same_offsets _TSObject _ts int64_t value - int out_local=0, out_tzoffset=0 + int out_local = 0, out_tzoffset = 0 float offset_seconds, tz_offset set out_tzoffset_vals = set() bint string_to_dts_failed @@ -659,7 +660,7 @@ cdef array_to_datetime_object( ndarray[object] values, str errors, bint dayfirst=False, - bint yearfirst=False + bint yearfirst=False, ): """ Fall back function for array_to_datetime @@ -671,7 +672,7 @@ cdef array_to_datetime_object( ---------- values : ndarray of object date-like objects to convert - errors : str, default 'raise' + errors : str error behavior when parsing dayfirst : bool, default False dayfirst parsing behavior when encountering datetime strings @@ -684,7 +685,7 @@ cdef array_to_datetime_object( """ cdef: Py_ssize_t i, n = len(values) - object val, + object val bint is_ignore = errors == 'ignore' bint is_coerce = errors == 'coerce' bint is_raise = errors == 'raise' diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 884578df3e00b..660b582f73e6e 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -5,7 +5,7 @@ import locale import calendar import re -from cpython cimport datetime +from cpython.datetime cimport date, tzinfo from _thread import allocate_lock as _thread_allocate_lock @@ -291,20 +291,20 @@ def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise' elif iso_year != -1 and iso_week != -1: year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1) - # Cannot pre-calculate datetime.date() since can change in Julian + # Cannot pre-calculate date() since can change in Julian # calculation and thus could have different value for the day of the wk # calculation. try: if julian == -1: # Need to add 1 to result since first day of the year is 1, not # 0. - ordinal = datetime.date(year, month, day).toordinal() - julian = ordinal - datetime.date(year, 1, 1).toordinal() + 1 + ordinal = date(year, month, day).toordinal() + julian = ordinal - date(year, 1, 1).toordinal() + 1 else: # Assume that if they bothered to include Julian day it will # be accurate. - datetime_result = datetime.date.fromordinal( - (julian - 1) + datetime.date(year, 1, 1).toordinal()) + datetime_result = date.fromordinal( + (julian - 1) + date(year, 1, 1).toordinal()) year = datetime_result.year month = datetime_result.month day = datetime_result.day @@ -314,7 +314,7 @@ def array_strptime(object[:] values, object fmt, bint exact=True, errors='raise' continue raise if weekday == -1: - weekday = datetime.date(year, month, day).weekday() + weekday = date(year, month, day).weekday() dts.year = year dts.month = month @@ -652,7 +652,7 @@ cdef int _calc_julian_from_U_or_W(int year, int week_of_year, cdef: int first_weekday, week_0_length, days_to_week - first_weekday = datetime.date(year, 1, 1).weekday() + first_weekday = date(year, 1, 1).weekday() # If we are dealing with the %U directive (week starts on Sunday), it's # easier to just shift the view to Sunday being the first day of the # week. @@ -695,18 +695,18 @@ cdef (int, int) _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday) cdef: int correction, ordinal - correction = datetime.date(iso_year, 1, 4).isoweekday() + 3 + correction = date(iso_year, 1, 4).isoweekday() + 3 ordinal = (iso_week * 7) + iso_weekday - correction # ordinal may be negative or 0 now, which means the date is in the previous # calendar year if ordinal < 1: - ordinal += datetime.date(iso_year, 1, 1).toordinal() + ordinal += date(iso_year, 1, 1).toordinal() iso_year -= 1 - ordinal -= datetime.date(iso_year, 1, 1).toordinal() + ordinal -= date(iso_year, 1, 1).toordinal() return iso_year, ordinal -cdef parse_timezone_directive(str z): +cdef tzinfo parse_timezone_directive(str z): """ Parse the '%z' directive and return a pytz.FixedOffset From f477a0e8146e3f249d2a5c08031020c15c1ecd95 Mon Sep 17 00:00:00 2001 From: SurajH1 <53505344+SurajH1@users.noreply.github.com> Date: Mon, 13 Jul 2020 07:55:11 -0500 Subject: [PATCH 112/632] TST: concat(..., copy=False) with datetime tz-aware data raises ValueError (#33458) Co-authored-by: Simon Hawkins --- pandas/tests/dtypes/test_concat.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py index 1fbbd3356ae13..5a9ad732792ea 100644 --- a/pandas/tests/dtypes/test_concat.py +++ b/pandas/tests/dtypes/test_concat.py @@ -88,3 +88,14 @@ def test_concat_mismatched_categoricals_with_empty(): result = _concat.concat_compat([ser1._values, ser2._values]) expected = pd.concat([ser1, ser2])._values tm.assert_categorical_equal(result, expected) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_concat_single_dataframe_tz_aware(copy): + # https://github.com/pandas-dev/pandas/issues/25257 + df = pd.DataFrame( + {"timestamp": [pd.Timestamp("2020-04-08 09:00:00.709949+0000", tz="UTC")]} + ) + expected = df.copy() + result = pd.concat([df], copy=copy) + tm.assert_frame_equal(result, expected) From b600f8d0b5e6765f6a0baed6d5c1794b15d7d258 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 13 Jul 2020 10:26:59 -0500 Subject: [PATCH 113/632] CI: Ignore setuptools distutils warning (#35260) --- ci/deps/azure-36-32bit.yaml | 2 +- ci/deps/azure-36-locale.yaml | 2 +- ci/deps/azure-36-locale_slow.yaml | 2 +- ci/deps/azure-36-minimum_versions.yaml | 2 +- ci/deps/azure-36-slow.yaml | 2 +- ci/deps/azure-37-locale.yaml | 2 +- ci/deps/azure-37-numpydev.yaml | 2 +- ci/deps/azure-macos-36.yaml | 2 +- ci/deps/azure-windows-36.yaml | 2 +- ci/deps/azure-windows-37.yaml | 2 +- ci/deps/travis-36-cov.yaml | 2 +- ci/deps/travis-36-locale.yaml | 2 +- ci/deps/travis-37-arm64.yaml | 2 +- ci/deps/travis-37.yaml | 2 +- ci/deps/travis-38.yaml | 2 +- environment.yml | 2 +- pandas/tests/util/test_show_versions.py | 4 ++++ requirements-dev.txt | 2 +- 18 files changed, 21 insertions(+), 17 deletions(-) diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 15704cf0d5427..2dc53f8181ac4 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -23,4 +23,4 @@ dependencies: - pip - pip: - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index a9b9a5a47ccf5..d31015fde4741 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index c086b3651afc3..23121b985492e 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index f5af7bcf36189..9f66f82720b5b 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython=0.29.16 - - pytest=5.0.1 + - pytest>=5.0.1, <6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-36-slow.yaml b/ci/deps/azure-36-slow.yaml index 87bad59fa4873..0a6d1d13c8549 100644 --- a/ci/deps/azure-36-slow.yaml +++ b/ci/deps/azure-36-slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 81e336cf1ed7f..714e1100b1e1a 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 5cb58756a6ac1..451fb5884a4af 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index eeea249a19ca1..81a27465f9e61 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.6.* # tools - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 548660cabaa67..4d7e1d821037b 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 5bbd0e2795d7e..34fca631df6c1 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 177e0d3f4c0af..5f5ea8034cddf 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-cov # this is only needed in the coverage build diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 03a1e751b6a86..6bc4aba733ee5 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index 5cb53489be225..f434a03609b26 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.13 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index e896233aac63c..aaf706d61fe5c 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index b879c0f81dab2..ac39a223cd086 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/environment.yml b/environment.yml index 80dbffebf6b9d..32ff8c91cb69c 100644 --- a/environment.yml +++ b/environment.yml @@ -51,7 +51,7 @@ dependencies: - botocore>=1.11 - hypothesis>=3.82 - moto # mock S3 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0rc0 - pytest-cov - pytest-xdist>=1.21 - pytest-asyncio diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index e36ea662fac8b..04e841c05e44a 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -21,6 +21,10 @@ # pandas_datareader "ignore:pandas.util.testing is deprecated:FutureWarning" ) +@pytest.mark.filterwarnings( + # https://github.com/pandas-dev/pandas/issues/35252 + "ignore:Distutils:UserWarning" +) def test_show_versions(capsys): # gh-32041 pd.show_versions() diff --git a/requirements-dev.txt b/requirements-dev.txt index 886f400caf44f..3cda38d4b72f5 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,7 +32,7 @@ boto3 botocore>=1.11 hypothesis>=3.82 moto -pytest>=5.0.1 +pytest>=5.0.1,<6.0.0rc0 pytest-cov pytest-xdist>=1.21 pytest-asyncio From 859eb6ad6f666d35cd919f428dedb7c8bbf51c8f Mon Sep 17 00:00:00 2001 From: avinashpancham <44933366+avinashpancham@users.noreply.github.com> Date: Mon, 13 Jul 2020 18:36:11 +0200 Subject: [PATCH 114/632] TST: GH20676 Verify equals operator for list of Numpy arrays (#35237) --- pandas/tests/generic/test_generic.py | 37 +-------------- pandas/tests/series/methods/test_equals.py | 55 ++++++++++++++++++++++ 2 files changed, 56 insertions(+), 36 deletions(-) create mode 100644 pandas/tests/series/methods/test_equals.py diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 94747a52136c4..5e66925a38ec6 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -8,7 +8,7 @@ from pandas.core.dtypes.common import is_scalar import pandas as pd -from pandas import DataFrame, MultiIndex, Series, date_range +from pandas import DataFrame, Series, date_range import pandas._testing as tm import pandas.core.common as com @@ -785,26 +785,6 @@ def test_depr_take_kwarg_is_copy(self, is_copy): s.take([0, 1], is_copy=is_copy) def test_equals(self): - s1 = pd.Series([1, 2, 3], index=[0, 2, 1]) - s2 = s1.copy() - assert s1.equals(s2) - - s1[1] = 99 - assert not s1.equals(s2) - - # NaNs compare as equal - s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3]) - s2 = s1.copy() - assert s1.equals(s2) - - s2[0] = 9.9 - assert not s1.equals(s2) - - idx = MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c")]) - s1 = Series([1, 2, np.nan], index=idx) - s2 = s1.copy() - assert s1.equals(s2) - # Add object dtype column with nans index = np.random.random(10) df1 = DataFrame(np.random.random(10), index=index, columns=["floats"]) @@ -857,21 +837,6 @@ def test_equals(self): df2 = df1.set_index(["floats"], append=True) assert df3.equals(df2) - # GH 8437 - a = pd.Series([False, np.nan]) - b = pd.Series([False, np.nan]) - c = pd.Series(index=range(2), dtype=object) - d = c.copy() - e = c.copy() - f = c.copy() - c[:-1] = d[:-1] = e[0] = f[0] = False - assert a.equals(a) - assert a.equals(b) - assert a.equals(c) - assert a.equals(d) - assert a.equals(e) - assert e.equals(f) - def test_pipe(self): df = DataFrame({"A": [1, 2, 3]}) f = lambda x, y: x ** y diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py new file mode 100644 index 0000000000000..600154adfcda3 --- /dev/null +++ b/pandas/tests/series/methods/test_equals.py @@ -0,0 +1,55 @@ +import numpy as np +import pytest + +from pandas import MultiIndex, Series + + +@pytest.mark.parametrize( + "arr, idx", + [ + ([1, 2, 3, 4], [0, 2, 1, 3]), + ([1, np.nan, 3, np.nan], [0, 2, 1, 3]), + ( + [1, np.nan, 3, np.nan], + MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]), + ), + ], +) +def test_equals(arr, idx): + s1 = Series(arr, index=idx) + s2 = s1.copy() + assert s1.equals(s2) + + s1[1] = 9 + assert not s1.equals(s2) + + +def test_equals_list_array(): + # GH20676 Verify equals operator for list of Numpy arrays + arr = np.array([1, 2]) + s1 = Series([arr, arr]) + s2 = s1.copy() + assert s1.equals(s2) + + # TODO: Series equals should also work between single value and list + # s1[1] = 9 + # assert not s1.equals(s2) + + +def test_equals_false_negative(): + # GH8437 Verify false negative behavior of equals function for dtype object + arr = [False, np.nan] + s1 = Series(arr) + s2 = s1.copy() + s3 = Series(index=range(2), dtype=object) + s4 = s3.copy() + s5 = s3.copy() + s6 = s3.copy() + + s3[:-1] = s4[:-1] = s5[0] = s6[0] = False + assert s1.equals(s1) + assert s1.equals(s2) + assert s1.equals(s3) + assert s1.equals(s4) + assert s1.equals(s5) + assert s5.equals(s6) From 591da6b50b512837d670a0aaf58c1efb9dae2a61 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 13 Jul 2020 09:42:46 -0700 Subject: [PATCH 115/632] REF: make tz_convert match pattern eslewhere (#35255) --- pandas/_libs/tslibs/tzconversion.pyx | 115 +++++++++++---------------- 1 file changed, 45 insertions(+), 70 deletions(-) diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index a6afd47d93479..606639af16a18 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -382,7 +382,10 @@ cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz): converted: int64 """ cdef: - int64_t arr[1] + int64_t delta + int64_t[:] deltas + ndarray[int64_t, ndim=1] trans + intp_t pos if val == NPY_NAT: return val @@ -391,9 +394,14 @@ cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz): return val elif is_tzlocal(tz): return _tz_convert_tzlocal_utc(val, tz, to_utc=False) + elif is_fixed_offset(tz): + _, deltas, _ = get_dst_info(tz) + delta = deltas[0] + return val + delta else: - arr[0] = val - return _tz_convert_dst(arr, tz)[0] + trans, deltas, _ = get_dst_info(tz) + pos = trans.searchsorted(val, side="right") - 1 + return val + deltas[pos] def tz_convert_from_utc(int64_t[:] vals, tzinfo tz): @@ -435,9 +443,12 @@ cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): converted : ndarray[int64_t] """ cdef: - int64_t[:] converted + int64_t[:] converted, deltas Py_ssize_t i, n = len(vals) - int64_t val + int64_t val, delta + intp_t[:] pos + ndarray[int64_t] trans + str typ if is_utc(tz): converted = vals @@ -450,7 +461,35 @@ cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): else: converted[i] = _tz_convert_tzlocal_utc(val, tz, to_utc=False) else: - converted = _tz_convert_dst(vals, tz) + converted = np.empty(n, dtype=np.int64) + + trans, deltas, typ = get_dst_info(tz) + + if typ not in ["pytz", "dateutil"]: + # FixedOffset, we know len(deltas) == 1 + delta = deltas[0] + + for i in range(n): + val = vals[i] + if val == NPY_NAT: + converted[i] = val + else: + converted[i] = val + delta + + else: + pos = trans.searchsorted(vals, side="right") - 1 + + for i in range(n): + val = vals[i] + if val == NPY_NAT: + converted[i] = val + else: + if pos[i] < 0: + # TODO: How is this reached? Should we be checking for + # it elsewhere? + raise ValueError("First time before start of DST info") + + converted[i] = val + deltas[pos[i]] return converted @@ -537,67 +576,3 @@ cdef int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, bint to_utc=True, return val - delta else: return val + delta - - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef int64_t[:] _tz_convert_dst(const int64_t[:] values, tzinfo tz): - """ - tz_convert for non-UTC non-tzlocal cases where we have to check - DST transitions pointwise. - - Parameters - ---------- - values : ndarray[int64_t] - tz : tzinfo - - Returns - ------- - result : ndarray[int64_t] - """ - cdef: - Py_ssize_t n = len(values) - Py_ssize_t i - intp_t[:] pos - int64_t[:] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans - int64_t[:] deltas - int64_t v, delta - str typ - - # tz is assumed _not_ to be tzlocal; that should go - # through _tz_convert_tzlocal_utc - - trans, deltas, typ = get_dst_info(tz) - - if typ not in ["pytz", "dateutil"]: - # FixedOffset, we know len(deltas) == 1 - delta = deltas[0] - - for i in range(n): - v = values[i] - if v == NPY_NAT: - result[i] = v - else: - result[i] = v + delta - - else: - # Previously, this search was done pointwise to try and benefit - # from getting to skip searches for iNaTs. However, it seems call - # overhead dominates the search time so doing it once in bulk - # is substantially faster (GH#24603) - pos = trans.searchsorted(values, side="right") - 1 - - for i in range(n): - v = values[i] - if v == NPY_NAT: - result[i] = v - else: - if pos[i] < 0: - # TODO: How is this reached? Should we be checking for - # it elsewhere? - raise ValueError("First time before start of DST info") - - result[i] = v + deltas[pos[i]] - - return result From af964cac27304de08729b79eac12e3b8ef943632 Mon Sep 17 00:00:00 2001 From: Robin to Roxel <35864265+r-toroxel@users.noreply.github.com> Date: Mon, 13 Jul 2020 22:36:04 +0200 Subject: [PATCH 116/632] TST verify return none inplace in tests/indexing (#35230) --- pandas/tests/frame/test_constructors.py | 6 ++++-- pandas/tests/indexing/multiindex/test_indexing_slow.py | 9 ++++++--- pandas/tests/indexing/multiindex/test_ix.py | 3 ++- pandas/tests/indexing/multiindex/test_sorted.py | 6 ++++-- pandas/tests/indexing/multiindex/test_xs.py | 6 ++++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index bfff58d05007f..17ac2307b9da6 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1208,8 +1208,10 @@ def test_constructor_single_row(self): data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])] result = DataFrame(data) - expected = DataFrame.from_dict(dict(zip([0], data)), orient="index") - tm.assert_frame_equal(result, expected.reindex(result.index)) + expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex( + result.index + ) + tm.assert_frame_equal(result, expected) def test_constructor_ordered_dict_preserve_order(self): # see gh-13304 diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py index ea4453b8dd6eb..be193e0854d8d 100644 --- a/pandas/tests/indexing/multiindex/test_indexing_slow.py +++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py @@ -34,12 +34,15 @@ def validate(mi, df, key): right = df[mask].copy() if i + 1 != len(key): # partial key - right.drop(cols[: i + 1], axis=1, inplace=True) - right.set_index(cols[i + 1 : -1], inplace=True) + return_value = right.drop(cols[: i + 1], axis=1, inplace=True) + assert return_value is None + return_value = right.set_index(cols[i + 1 : -1], inplace=True) + assert return_value is None tm.assert_frame_equal(mi.loc[key[: i + 1]], right) else: # full key - right.set_index(cols[:-1], inplace=True) + return_value = right.set_index(cols[:-1], inplace=True) + assert return_value is None if len(right) == 1: # single hit right = Series( right["jolia"].values, name=right.index[0], index=["jolia"] diff --git a/pandas/tests/indexing/multiindex/test_ix.py b/pandas/tests/indexing/multiindex/test_ix.py index 01b0b392d52a3..abf989324e4a5 100644 --- a/pandas/tests/indexing/multiindex/test_ix.py +++ b/pandas/tests/indexing/multiindex/test_ix.py @@ -35,7 +35,8 @@ def test_loc_general(self): tm.assert_frame_equal(df.loc[key], df.iloc[2:]) # this is ok - df.sort_index(inplace=True) + return_value = df.sort_index(inplace=True) + assert return_value is None res = df.loc[key] # col has float dtype, result should be Float64Index diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py index fdeb3ce95b0bb..572cb9da405d1 100644 --- a/pandas/tests/indexing/multiindex/test_sorted.py +++ b/pandas/tests/indexing/multiindex/test_sorted.py @@ -43,8 +43,10 @@ def test_frame_getitem_not_sorted2(self, key): df2 = df.set_index(["col1", "col2"]) df2_original = df2.copy() - df2.index.set_levels(["b", "d", "a"], level="col1", inplace=True) - df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True) + return_value = df2.index.set_levels(["b", "d", "a"], level="col1", inplace=True) + assert return_value is None + return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True) + assert return_value is None assert not df2.index.is_lexsorted() assert not df2.index.is_monotonic diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py index ff748d755c063..b807795b9c309 100644 --- a/pandas/tests/indexing/multiindex/test_xs.py +++ b/pandas/tests/indexing/multiindex/test_xs.py @@ -237,9 +237,11 @@ def test_series_getitem_multiindex_xs_by_label(): [("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")] ) s = Series([1, 2, 3, 4], index=idx) - s.index.set_names(["L1", "L2"], inplace=True) + return_value = s.index.set_names(["L1", "L2"], inplace=True) + assert return_value is None expected = Series([1, 3], index=["a", "b"]) - expected.index.set_names(["L1"], inplace=True) + return_value = expected.index.set_names(["L1"], inplace=True) + assert return_value is None result = s.xs("one", level="L2") tm.assert_series_equal(result, expected) From 0ed1dcd5395a44198c41fd7693ed53133c8a0d78 Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Mon, 13 Jul 2020 23:22:45 +0100 Subject: [PATCH 117/632] BUG: ValueError on groupby with categoricals (#35253) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/groupby/generic.py | 6 ++- pandas/tests/groupby/test_categorical.py | 50 ++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 5f93e08d51baa..85b29a58a1f15 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1091,6 +1091,7 @@ Groupby/resample/rolling - Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`) - Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`) - Bug in :meth:`core.groupby.DataFrameGroupBy.transform` when ``func='nunique'`` and columns are of type ``datetime64``, the result would also be of type ``datetime64`` instead of ``int64`` (:issue:`35109`) +- Bug in :meth:'DataFrameGroupBy.first' and :meth:'DataFrameGroupBy.last' that would raise an unnecessary ``ValueError`` when grouping on multiple ``Categoricals`` (:issue:`34951`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1f49ee2b0b665..093e1d4ab3942 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1058,7 +1058,11 @@ def _cython_agg_blocks( # reductions; see GH#28949 obj = obj.iloc[:, 0] - s = get_groupby(obj, self.grouper) + # Create SeriesGroupBy with observed=True so that it does + # not try to add missing categories if grouping over multiple + # Categoricals. This will done by later self._reindex_output() + # Doing it here creates an error. See GH#34951 + s = get_groupby(obj, self.grouper, observed=True) try: result = s.aggregate(lambda x: alt(x, axis=self.axis)) except TypeError: diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 118d928ac02f4..7e4513da37dc9 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1669,3 +1669,53 @@ def test_categorical_transform(): expected["status"] = expected["status"].astype(delivery_status_type) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals( + func: str, observed: bool +): + # GH 34951 + cat = pd.Categorical([0, 0, 1, 1]) + val = [0, 1, 1, 0] + df = pd.DataFrame({"a": cat, "b": cat, "c": val}) + + idx = pd.Categorical([0, 1]) + idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) + expected_dict = { + "first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"), + "last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"), + } + + expected = expected_dict[func] + if observed: + expected = expected.dropna().astype(np.int64) + + srs_grp = df.groupby(["a", "b"], observed=observed)["c"] + result = getattr(srs_grp, func)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["first", "last"]) +def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals( + func: str, observed: bool +): + # GH 34951 + cat = pd.Categorical([0, 0, 1, 1]) + val = [0, 1, 1, 0] + df = pd.DataFrame({"a": cat, "b": cat, "c": val}) + + idx = pd.Categorical([0, 1]) + idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"]) + expected_dict = { + "first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"), + "last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"), + } + + expected = expected_dict[func].to_frame() + if observed: + expected = expected.dropna().astype(np.int64) + + df_grp = df.groupby(["a", "b"], observed=observed) + result = getattr(df_grp, func)() + tm.assert_frame_equal(result, expected) From 76bdaeaebf58bbfed25b66469f1883962f926427 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 14 Jul 2020 14:56:49 +0100 Subject: [PATCH 118/632] CI: pin pytest in minimum verions (#35274) --- ci/deps/azure-36-minimum_versions.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-36-minimum_versions.yaml index 9f66f82720b5b..f5af7bcf36189 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-36-minimum_versions.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython=0.29.16 - - pytest>=5.0.1, <6.0.0rc0 + - pytest=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines From 84e83b1d917236ef76abc314258e1a37504a2259 Mon Sep 17 00:00:00 2001 From: Lewis Cowles Date: Tue, 14 Jul 2020 18:05:58 +0100 Subject: [PATCH 119/632] Improved error message for invalid construction (#35190) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/internals/construction.py | 7 ++++++- pandas/tests/extension/base/setitem.py | 5 ++++- pandas/tests/frame/indexing/test_indexing.py | 9 ++++++--- pandas/tests/frame/indexing/test_setitem.py | 5 ++++- 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 85b29a58a1f15..a460ad247e152 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1170,6 +1170,7 @@ Other - Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) - Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`) - Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) +- Improved error message for invalid construction of list when creating a new index (:issue:`35190`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 4b9db810dead0..2d4163e0dee89 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -744,7 +744,12 @@ def sanitize_index(data, index: Index): through a non-Index. """ if len(data) != len(index): - raise ValueError("Length of values does not match length of index") + raise ValueError( + "Length of values " + f"({len(data)}) " + "does not match length of index " + f"({len(index)})" + ) if isinstance(data, np.ndarray): diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index bfa53ad02525b..a4e6fc0f78cbb 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -244,7 +244,10 @@ def test_setitem_expand_with_extension(self, data): def test_setitem_frame_invalid_length(self, data): df = pd.DataFrame({"A": [1] * len(data)}) - xpr = "Length of values does not match length of index" + xpr = ( + rf"Length of values \({len(data[:5])}\) " + rf"does not match length of index \({len(df)}\)" + ) with pytest.raises(ValueError, match=xpr): df["B"] = data[:5] diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 3fa3c9303806f..d27487dfb8aaa 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -160,10 +160,13 @@ def test_setitem_list(self, float_frame): msg = "Columns must be same length as key" with pytest.raises(ValueError, match=msg): data[["A"]] = float_frame[["A", "B"]] - - msg = "Length of values does not match length of index" + newcolumndata = range(len(data.index) - 1) + msg = ( + rf"Length of values \({len(newcolumndata)}\) " + rf"does not match length of index \({len(data)}\)" + ) with pytest.raises(ValueError, match=msg): - data["A"] = range(len(data.index) - 1) + data["A"] = newcolumndata df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_) df.loc[1, ["tt1", "tt2"]] = [1, 2] diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 9bb5338f1e07f..c5945edfd3127 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -117,7 +117,10 @@ def test_setitem_wrong_length_categorical_dtype_raises(self): cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"]) df = DataFrame(range(10), columns=["bar"]) - msg = "Length of values does not match length of index" + msg = ( + rf"Length of values \({len(cat)}\) " + rf"does not match length of index \({len(df)}\)" + ) with pytest.raises(ValueError, match=msg): df["foo"] = cat From 1418ce81e03f3b17311e19158f62672fab55f904 Mon Sep 17 00:00:00 2001 From: MBrouns Date: Tue, 14 Jul 2020 19:08:44 +0200 Subject: [PATCH 120/632] Deprecate `center` on `df.expanding` (#34887) --- doc/source/whatsnew/v1.1.0.rst | 3 +++ pandas/core/generic.py | 12 +++++++++++- pandas/core/window/expanding.py | 2 +- .../test_moments_consistency_expanding.py | 10 +++++----- pandas/tests/window/test_api.py | 11 ++--------- pandas/tests/window/test_expanding.py | 16 ++++++++++++++++ 6 files changed, 38 insertions(+), 16 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index a460ad247e152..56c816eecb5ca 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -825,6 +825,9 @@ Deprecations precision through the ``rtol``, and ``atol`` parameters, thus deprecating the ``check_less_precise`` parameter. (:issue:`13357`). - :func:`DataFrame.melt` accepting a value_name that already exists is deprecated, and will be removed in a future version (:issue:`34731`) +- the ``center`` keyword in the :meth:`DataFrame.expanding` function is deprecated and will be removed in a future version (:issue:`20647`) + + .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 571fcc67f3bb5..ece4281af3208 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10500,8 +10500,18 @@ def rolling( cls.rolling = rolling @doc(Expanding) - def expanding(self, min_periods=1, center=False, axis=0): + def expanding(self, min_periods=1, center=None, axis=0): axis = self._get_axis_number(axis) + if center is not None: + warnings.warn( + "The `center` argument on `expanding` " + "will be removed in the future", + FutureWarning, + stacklevel=2, + ) + else: + center = False + return Expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index bbc19fad8b799..8267cd4f0971e 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -57,7 +57,7 @@ class Expanding(_Rolling_and_Expanding): _attributes = ["min_periods", "center", "axis"] - def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs): + def __init__(self, obj, min_periods=1, center=None, axis=0, **kwargs): super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis) @property diff --git a/pandas/tests/window/moments/test_moments_consistency_expanding.py b/pandas/tests/window/moments/test_moments_consistency_expanding.py index ee3579d76d1db..3ec91dcb60610 100644 --- a/pandas/tests/window/moments/test_moments_consistency_expanding.py +++ b/pandas/tests/window/moments/test_moments_consistency_expanding.py @@ -119,8 +119,8 @@ def test_expanding_corr_pairwise(frame): ids=["sum", "mean", "max", "min"], ) def test_expanding_func(func, static_comp, has_min_periods, series, frame, nan_locs): - def expanding_func(x, min_periods=1, center=False, axis=0): - exp = x.expanding(min_periods=min_periods, center=center, axis=axis) + def expanding_func(x, min_periods=1, axis=0): + exp = x.expanding(min_periods=min_periods, axis=axis) return getattr(exp, func)() _check_expanding( @@ -166,7 +166,7 @@ def test_expanding_apply_consistency( with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning, + "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning ) # test consistency between expanding_xyz() and either (a) # expanding_apply of Series.xyz(), or (b) expanding_apply of @@ -267,7 +267,7 @@ def test_expanding_consistency(consistency_data, min_periods): # with empty/0-length Series/DataFrames with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning, + "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning ) # test consistency between different expanding_* moments @@ -454,7 +454,7 @@ def test_expanding_cov_pairwise_diff_length(): def test_expanding_corr_pairwise_diff_length(): # GH 7512 df1 = DataFrame( - [[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar"), + [[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar") ) df1a = DataFrame( [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"] diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 33fb79d98a324..2c3d8b4608806 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -107,10 +107,7 @@ def test_agg(): with pytest.raises(SpecificationError, match=msg): r.aggregate( - { - "A": {"mean": "mean", "sum": "sum"}, - "B": {"mean2": "mean", "sum2": "sum"}, - } + {"A": {"mean": "mean", "sum": "sum"}, "B": {"mean2": "mean", "sum2": "sum"}} ) result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]}) @@ -191,11 +188,7 @@ def test_count_nonnumeric_types(): "dt_nat", "periods_nat", ] - dt_nat_col = [ - Timestamp("20170101"), - Timestamp("20170203"), - Timestamp(None), - ] + dt_nat_col = [Timestamp("20170101"), Timestamp("20170203"), Timestamp(None)] df = DataFrame( { diff --git a/pandas/tests/window/test_expanding.py b/pandas/tests/window/test_expanding.py index 30d65ebe84a1f..146eca07c523e 100644 --- a/pandas/tests/window/test_expanding.py +++ b/pandas/tests/window/test_expanding.py @@ -16,6 +16,9 @@ def test_doc_string(): df.expanding(2).sum() +@pytest.mark.filterwarnings( + "ignore:The `center` argument on `expanding` will be removed in the future" +) def test_constructor(which): # GH 12669 @@ -213,3 +216,16 @@ def test_iter_expanding_series(ser, expected, min_periods): for (expected, actual) in zip(expected, ser.expanding(min_periods)): tm.assert_series_equal(actual, expected) + + +def test_center_deprecate_warning(): + # GH 20647 + df = pd.DataFrame() + with tm.assert_produces_warning(FutureWarning): + df.expanding(center=True) + + with tm.assert_produces_warning(FutureWarning): + df.expanding(center=False) + + with tm.assert_produces_warning(None): + df.expanding() From bfb371549a2e3e548f2591f32326a8951bad592c Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 14 Jul 2020 12:09:41 -0500 Subject: [PATCH 121/632] Move API changes to appropriate sections (#35273) --- doc/source/whatsnew/v1.1.0.rst | 71 ++++++++++++++++------------------ 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 56c816eecb5ca..088f1d1946fa9 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -276,6 +276,10 @@ change, as ``fsspec`` will still bring in the same packages as before. Other enhancements ^^^^^^^^^^^^^^^^^^ +- Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). +- Added :meth:`DataFrame.value_counts` (:issue:`5377`) +- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. +- Added a :func:`pandas.api.indexers.VariableOffsetWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`) - :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`) - :meth:`Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) - When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`) @@ -336,10 +340,12 @@ Other enhancements .. --------------------------------------------------------------------------- -.. _whatsnew_110.api: +.. _whatsnew_110.notable_bug_fixes: + +Notable bug fixes +~~~~~~~~~~~~~~~~~ -Backwards incompatible API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +These are bug fixes that might have notable behavior changes. ``MultiIndex.get_indexer`` interprets `method` argument differently ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -403,7 +409,7 @@ And the differences in reindexing ``df`` with ``mi_2`` and using ``method='pad'` - -.. _whatsnew_110.api_breaking.indexing_raises_key_errors: +.. _whatsnew_110.notable_bug_fixes.indexing_raises_key_errors: Failed Label-Based Lookups Always Raise KeyError ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -473,7 +479,10 @@ key and type of :class:`Index`. These now consistently raise ``KeyError`` (:iss ... KeyError: Timestamp('1970-01-01 00:00:00') -.. _whatsnew_110.api_breaking.indexing_int_multiindex_raises_key_errors: + +Similarly, :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`) + +.. _whatsnew_110.notable_bug_fixes.indexing_int_multiindex_raises_key_errors: Failed Integer Lookups on MultiIndex Raise KeyError ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -531,7 +540,7 @@ those integer keys is not present in the first level of the index (:issue:`33539 .. --------------------------------------------------------------------------- -.. _whatsnew_110.api_breaking.assignment_to_multiple_columns: +.. _whatsnew_110.notable_bug_fixes.assignment_to_multiple_columns: Assignment to multiple columns of a DataFrame when some columns do not exist ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -562,7 +571,7 @@ Assignment to multiple columns of a :class:`DataFrame` when some of the columns df[['a', 'c']] = 1 df -.. _whatsnew_110.api_breaking.groupby_consistency: +.. _whatsnew_110.notable_bug_fixes.groupby_consistency: Consistency across groupby reductions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -628,7 +637,7 @@ The method :meth:`core.DataFrameGroupBy.size` would previously ignore ``as_index df.groupby("a", as_index=False).size() -.. _whatsnew_110.api_breaking.apply_applymap_first_once: +.. _whatsnew_110.notable_bug_fixes.apply_applymap_first_once: apply and applymap on ``DataFrame`` evaluates first row/column only once ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -673,34 +682,6 @@ Other API changes - :meth:`Series.describe` will now show distribution percentiles for ``datetime`` dtypes, statistics ``first`` and ``last`` will now be ``min`` and ``max`` to match with numeric dtypes in :meth:`DataFrame.describe` (:issue:`30164`) -- Added :meth:`DataFrame.value_counts` (:issue:`5377`) -- :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`) -- ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`) -- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) -- Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. -- Added a :func:`pandas.api.indexers.VariableOffsetWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`) -- Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). -- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`. - Previously an ``AttributeError`` was raised (:issue:`31126`) -- :meth:`DataFrame.xs` now raises a ``TypeError`` if a ``level`` keyword is supplied and the axis is not a :class:`MultiIndex`. - Previously an ``AttributeError`` was raised (:issue:`33610`) -- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`) - now raise a ``TypeError`` if a not-accepted keyword argument is passed into it. - Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`) -- :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`) -- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`) -- Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`) -- Combining a ``Categorical`` with integer categories and which contains missing values - with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append` - will now result in a float column instead of an object dtyped column (:issue:`33607`) -- :meth:`Series.to_timestamp` now raises a ``TypeError`` if the axis is not a :class:`PeriodIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) -- :meth:`Series.to_period` now raises a ``TypeError`` if the axis is not a :class:`DatetimeIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) -- :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. -- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` - (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) -- :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) -- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) -- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). Increased minimum versions for dependencies @@ -871,6 +852,8 @@ Bug fixes Categorical ^^^^^^^^^^^ +- Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`) +- Combining a ``Categorical`` with integer categories and which contains missing values with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append` will now result in a float column instead of an object dtyped column (:issue:`33607`) - Bug where :func:`merge` was unable to join on non-unique categorical indices (:issue:`28189`) - Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`) - Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`) @@ -880,6 +863,10 @@ Categorical Datetimelike ^^^^^^^^^^^^ +- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`) +- :meth:`Series.to_timestamp` now raises a ``TypeError`` if the axis is not a :class:`PeriodIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) +- :meth:`Series.to_period` now raises a ``TypeError`` if the axis is not a :class:`DatetimeIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) +- :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) - Bug in :class:`Timestamp` where constructing :class:`Timestamp` from ambiguous epoch time and calling constructor again changed :meth:`Timestamp.value` property (:issue:`24329`) - :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`) - Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`) @@ -947,7 +934,7 @@ Strings - Bug in the :meth:`~Series.astype` method when converting "string" dtype data to nullable integer dtype (:issue:`32450`). - Fixed issue where taking ``min`` or ``max`` of a ``StringArray`` or ``Series`` with ``StringDtype`` type would raise. (:issue:`31746`) - Bug in :meth:`Series.str.cat` returning ``NaN`` output when other had :class:`Index` type (:issue:`33425`) - +- :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. Interval ^^^^^^^^ @@ -956,6 +943,8 @@ Interval Indexing ^^^^^^^^ + +- :meth:`DataFrame.xs` now raises a ``TypeError`` if a ``level`` keyword is supplied and the axis is not a :class:`MultiIndex`. Previously an ``AttributeError`` was raised (:issue:`33610`) - Bug in slicing on a :class:`DatetimeIndex` with a partial-timestamp dropping high-resolution indices near the end of a year, quarter, or month (:issue:`31064`) - Bug in :meth:`PeriodIndex.get_loc` treating higher-resolution strings differently from :meth:`PeriodIndex.get_value` (:issue:`31172`) - Bug in :meth:`Series.at` and :meth:`DataFrame.at` not matching ``.loc`` behavior when looking up an integer in a :class:`Float64Index` (:issue:`31329`) @@ -999,6 +988,8 @@ Missing MultiIndex ^^^^^^^^^^ + +- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`. Previously an ``AttributeError`` was raised (:issue:`31126`) - Bug in :meth:`Dataframe.loc` when used with a :class:`MultiIndex`. The returned values were not in the same order as the given inputs (:issue:`22797`) .. ipython:: python @@ -1060,6 +1051,7 @@ I/O - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) - Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) - Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) +- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) Plotting ^^^^^^^^ @@ -1075,6 +1067,8 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) +- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`) now raise a ``TypeError`` if a not-accepted keyword argument is passed into it. Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`) - Bug in :meth:`GroupBy.apply` raises ``ValueError`` when the ``by`` axis is not sorted and has duplicates and the applied ``func`` does not mutate passed in objects (:issue:`30667`) - Bug in :meth:`DataFrameGroupby.transform` produces incorrect result with transformation functions (:issue:`30918`) - Bug in :meth:`Groupby.transform` was returning the wrong result when grouping by multiple keys of which some were categorical and others not (:issue:`32494`) @@ -1156,6 +1150,9 @@ ExtensionArray Other ^^^^^ + +- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). +- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) - Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True`` instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) - Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`) From 93a43838d607029c781b38ef420766d46aef801e Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Tue, 14 Jul 2020 18:15:06 +0100 Subject: [PATCH 122/632] TYP: Add type hints to pd.read_html (#34291) --- pandas/io/html.py | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/pandas/io/html.py b/pandas/io/html.py index c4ffe332e3020..3193f52d239f1 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -8,7 +8,9 @@ import numbers import os import re +from typing import Dict, List, Optional, Pattern, Sequence, Union +from pandas._typing import FilePathOrBuffer from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError, EmptyDataError from pandas.util._decorators import deprecate_nonkeyword_arguments @@ -16,6 +18,7 @@ from pandas.core.dtypes.common import is_list_like from pandas.core.construction import create_series_with_explicit_dtype +from pandas.core.frame import DataFrame from pandas.io.common import is_url, urlopen, validate_header_arg from pandas.io.formats.printing import pprint_thing @@ -924,22 +927,22 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): @deprecate_nonkeyword_arguments(version="2.0") def read_html( - io, - match=".+", - flavor=None, - header=None, - index_col=None, - skiprows=None, - attrs=None, - parse_dates=False, - thousands=",", - encoding=None, - decimal=".", - converters=None, + io: FilePathOrBuffer, + match: Union[str, Pattern] = ".+", + flavor: Optional[str] = None, + header: Optional[Union[int, Sequence[int]]] = None, + index_col: Optional[Union[int, Sequence[int]]] = None, + skiprows: Optional[Union[int, Sequence[int], slice]] = None, + attrs: Optional[Dict[str, str]] = None, + parse_dates: bool = False, + thousands: Optional[str] = ",", + encoding: Optional[str] = None, + decimal: str = ".", + converters: Optional[Dict] = None, na_values=None, - keep_default_na=True, - displayed_only=True, -): + keep_default_na: bool = True, + displayed_only: bool = True, +) -> List[DataFrame]: r""" Read HTML tables into a ``list`` of ``DataFrame`` objects. @@ -958,26 +961,26 @@ def read_html( This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. - flavor : str or None + flavor : str, optional The parsing engine to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. - header : int or list-like or None, optional + header : int or list-like, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. - index_col : int or list-like or None, optional + index_col : int or list-like, optional The column (or list of columns) to use to create the index. - skiprows : int or list-like or slice or None, optional + skiprows : int, list-like or slice, optional Number of rows to skip after parsing the column integer. 0-based. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. - attrs : dict or None, optional + attrs : dict, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be @@ -1005,7 +1008,7 @@ def read_html( thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. - encoding : str or None, optional + encoding : str, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use From e5dcdd16fa55a6028d8a389d258e6ab1fe5da131 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 14 Jul 2020 10:16:29 -0700 Subject: [PATCH 123/632] ASV: dt64arr_to_periodarr (#35244) --- asv_bench/benchmarks/tslibs/period.py | 60 +++++++++++++++++++-------- 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/asv_bench/benchmarks/tslibs/period.py b/asv_bench/benchmarks/tslibs/period.py index 1a2c89b48c665..849e8ec864ac2 100644 --- a/asv_bench/benchmarks/tslibs/period.py +++ b/asv_bench/benchmarks/tslibs/period.py @@ -9,7 +9,12 @@ from pandas.tseries.frequencies import to_offset -from .tslib import _sizes +from .tslib import _sizes, _tzs + +try: + from pandas._libs.tslibs.vectorized import dt64arr_to_periodarr +except ImportError: + from pandas._libs.tslibs.period import dt64arr_to_periodarr class PeriodProperties: @@ -75,26 +80,29 @@ def time_period_constructor(self, freq, is_offset): Period("2012-06-01", freq=freq) +_freq_ints = [ + 1000, + 1011, # Annual - November End + 2000, + 2011, # Quarterly - November End + 3000, + 4000, + 4006, # Weekly - Saturday End + 5000, + 6000, + 7000, + 8000, + 9000, + 10000, + 11000, + 12000, +] + + class TimePeriodArrToDT64Arr: params = [ _sizes, - [ - 1000, - 1011, # Annual - November End - 2000, - 2011, # Quarterly - November End - 3000, - 4000, - 4006, # Weekly - Saturday End - 5000, - 6000, - 7000, - 8000, - 9000, - 10000, - 11000, - 12000, - ], + _freq_ints, ] param_names = ["size", "freq"] @@ -104,3 +112,19 @@ def setup(self, size, freq): def time_periodarray_to_dt64arr(self, size, freq): periodarr_to_dt64arr(self.i8values, freq) + + +class TimeDT64ArrToPeriodArr: + params = [ + _sizes, + _freq_ints, + _tzs, + ] + param_names = ["size", "freq", "tz"] + + def setup(self, size, freq, tz): + arr = np.arange(10, dtype="i8").repeat(size // 10) + self.i8values = arr + + def time_dt64arr_to_periodarr(self, size, freq, tz): + dt64arr_to_periodarr(self.i8values, freq, tz) From b018691fa9df976cc252cfb8403f619d0bdd7060 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 14 Jul 2020 15:21:56 -0500 Subject: [PATCH 124/632] API: Make describe changes backwards compatible (#34798) --- doc/source/whatsnew/v1.1.0.rst | 10 +-- pandas/core/generic.py | 54 +++++++++++++++-- pandas/tests/frame/methods/test_describe.py | 64 +++++++++++++++++++- pandas/tests/series/methods/test_describe.py | 42 ++++++++++++- 4 files changed, 153 insertions(+), 17 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 088f1d1946fa9..cfac916157649 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -280,6 +280,7 @@ Other enhancements - Added :meth:`DataFrame.value_counts` (:issue:`5377`) - Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. - Added a :func:`pandas.api.indexers.VariableOffsetWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`) +- :meth:`~DataFrame.describe` now includes a ``datetime_is_numeric`` keyword to control how datetime columns are summarized (:issue:`30164`, :issue:`34798`) - :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`) - :meth:`Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) - When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`) @@ -675,15 +676,6 @@ apply and applymap on ``DataFrame`` evaluates first row/column only once df.apply(func, axis=1) -.. _whatsnew_110.api.other: - -Other API changes -^^^^^^^^^^^^^^^^^ - -- :meth:`Series.describe` will now show distribution percentiles for ``datetime`` dtypes, statistics ``first`` and ``last`` - will now be ``min`` and ``max`` to match with numeric dtypes in :meth:`DataFrame.describe` (:issue:`30164`) - - Increased minimum versions for dependencies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ece4281af3208..eb55369d83593 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9711,7 +9711,11 @@ def abs(self: FrameOrSeries) -> FrameOrSeries: return np.abs(self) def describe( - self: FrameOrSeries, percentiles=None, include=None, exclude=None + self: FrameOrSeries, + percentiles=None, + include=None, + exclude=None, + datetime_is_numeric=False, ) -> FrameOrSeries: """ Generate descriptive statistics. @@ -9757,6 +9761,12 @@ def describe( ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. + datetime_is_numeric : bool, default False + Whether to treat datetime dtypes as numeric. This affects statistics + calculated for the column. For DataFrame input, this also + controls whether datetime columns are included by default. + + .. versionadded:: 1.1.0 Returns ------- @@ -9834,7 +9844,7 @@ def describe( ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) - >>> s.describe() + >>> s.describe(datetime_is_numeric=True) count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 @@ -9992,8 +10002,37 @@ def describe_categorical_1d(data): dtype = None if result[1] > 0: top, freq = objcounts.index[0], objcounts.iloc[0] - names += ["top", "freq"] - result += [top, freq] + if is_datetime64_any_dtype(data.dtype): + if self.ndim == 1: + stacklevel = 4 + else: + stacklevel = 5 + warnings.warn( + "Treating datetime data as categorical rather than numeric in " + "`.describe` is deprecated and will be removed in a future " + "version of pandas. Specify `datetime_is_numeric=True` to " + "silence this warning and adopt the future behavior now.", + FutureWarning, + stacklevel=stacklevel, + ) + tz = data.dt.tz + asint = data.dropna().values.view("i8") + top = Timestamp(top) + if top.tzinfo is not None and tz is not None: + # Don't tz_localize(None) if key is already tz-aware + top = top.tz_convert(tz) + else: + top = top.tz_localize(tz) + names += ["top", "freq", "first", "last"] + result += [ + top, + freq, + Timestamp(asint.min(), tz=tz), + Timestamp(asint.max(), tz=tz), + ] + else: + names += ["top", "freq"] + result += [top, freq] # If the DataFrame is empty, set 'top' and 'freq' to None # to maintain output shape consistency @@ -10019,7 +10058,7 @@ def describe_1d(data): return describe_categorical_1d(data) elif is_numeric_dtype(data): return describe_numeric_1d(data) - elif is_datetime64_any_dtype(data.dtype): + elif is_datetime64_any_dtype(data.dtype) and datetime_is_numeric: return describe_timestamp_1d(data) elif is_timedelta64_dtype(data.dtype): return describe_numeric_1d(data) @@ -10030,7 +10069,10 @@ def describe_1d(data): return describe_1d(self) elif (include is None) and (exclude is None): # when some numerics are found, keep only numerics - data = self.select_dtypes(include=[np.number]) + default_include = [np.number] + if datetime_is_numeric: + default_include.append("datetime") + data = self.select_dtypes(include=default_include) if len(data.columns) == 0: data = self elif include == "all": diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py index b61d0d28e2fba..0b70bead375da 100644 --- a/pandas/tests/frame/methods/test_describe.py +++ b/pandas/tests/frame/methods/test_describe.py @@ -267,7 +267,69 @@ def test_describe_tz_values(self, tz_naive_fixture): }, index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], ) - result = df.describe(include="all") + result = df.describe(include="all", datetime_is_numeric=True) + tm.assert_frame_equal(result, expected) + + def test_datetime_is_numeric_includes_datetime(self): + df = pd.DataFrame({"a": pd.date_range("2012", periods=3), "b": [1, 2, 3]}) + result = df.describe(datetime_is_numeric=True) + expected = pd.DataFrame( + { + "a": [ + 3, + pd.Timestamp("2012-01-02"), + pd.Timestamp("2012-01-01"), + pd.Timestamp("2012-01-01T12:00:00"), + pd.Timestamp("2012-01-02"), + pd.Timestamp("2012-01-02T12:00:00"), + pd.Timestamp("2012-01-03"), + np.nan, + ], + "b": [3, 2, 1, 1.5, 2, 2.5, 3, 1], + }, + index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"], + ) + tm.assert_frame_equal(result, expected) + + def test_describe_tz_values2(self): + tz = "CET" + s1 = Series(range(5)) + start = Timestamp(2018, 1, 1) + end = Timestamp(2018, 1, 5) + s2 = Series(date_range(start, end, tz=tz)) + df = pd.DataFrame({"s1": s1, "s2": s2}) + + s1_ = s1.describe() + s2_ = pd.Series( + [ + 5, + 5, + s2.value_counts().index[0], + 1, + start.tz_localize(tz), + end.tz_localize(tz), + ], + index=["count", "unique", "top", "freq", "first", "last"], + ) + idx = [ + "count", + "unique", + "top", + "freq", + "first", + "last", + "mean", + "std", + "min", + "25%", + "50%", + "75%", + "max", + ] + expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx] + + with tm.assert_produces_warning(FutureWarning): + result = df.describe(include="all") tm.assert_frame_equal(result, expected) def test_describe_percentiles_integer_idx(self): diff --git a/pandas/tests/series/methods/test_describe.py b/pandas/tests/series/methods/test_describe.py index 4e59c6995f4f2..a15dc0751aa7d 100644 --- a/pandas/tests/series/methods/test_describe.py +++ b/pandas/tests/series/methods/test_describe.py @@ -83,7 +83,7 @@ def test_describe_with_tz(self, tz_naive_fixture): start = Timestamp(2018, 1, 1) end = Timestamp(2018, 1, 5) s = Series(date_range(start, end, tz=tz), name=name) - result = s.describe() + result = s.describe(datetime_is_numeric=True) expected = Series( [ 5, @@ -98,3 +98,43 @@ def test_describe_with_tz(self, tz_naive_fixture): index=["count", "mean", "min", "25%", "50%", "75%", "max"], ) tm.assert_series_equal(result, expected) + + def test_describe_with_tz_warns(self): + name = tz = "CET" + start = Timestamp(2018, 1, 1) + end = Timestamp(2018, 1, 5) + s = Series(date_range(start, end, tz=tz), name=name) + + with tm.assert_produces_warning(FutureWarning): + result = s.describe() + + expected = Series( + [ + 5, + 5, + s.value_counts().index[0], + 1, + start.tz_localize(tz), + end.tz_localize(tz), + ], + name=name, + index=["count", "unique", "top", "freq", "first", "last"], + ) + tm.assert_series_equal(result, expected) + + def test_datetime_is_numeric_includes_datetime(self): + s = Series(date_range("2012", periods=3)) + result = s.describe(datetime_is_numeric=True) + expected = Series( + [ + 3, + Timestamp("2012-01-02"), + Timestamp("2012-01-01"), + Timestamp("2012-01-01T12:00:00"), + Timestamp("2012-01-02"), + Timestamp("2012-01-02T12:00:00"), + Timestamp("2012-01-03"), + ], + index=["count", "mean", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) From b6222ec976a71b4ba0c643411606e2376e744c4d Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Tue, 14 Jul 2020 21:34:55 +0100 Subject: [PATCH 125/632] BUG: aggregations were getting overwritten if they had the same name (#30858) * :bug: aggregations were getting overwritten if they had the same name --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/groupby/generic.py | 15 +++-- .../tests/groupby/aggregate/test_aggregate.py | 58 +++++++++++++++++++ 3 files changed, 68 insertions(+), 6 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cfac916157649..3faca9c8868ca 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1093,6 +1093,7 @@ Reshaping - Bug in :func:`crosstab` when inputs are two Series and have tuple names, the output will keep dummy MultiIndex as columns. (:issue:`18321`) - :meth:`DataFrame.pivot` can now take lists for ``index`` and ``columns`` arguments (:issue:`21425`) - Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`) +- Bug in :meth:`SeriesGroupBy.aggregate` was resulting in aggregations being overwritten when they shared the same name (:issue:`30880`) - Bug where :meth:`Index.astype` would lose the name attribute when converting from ``Float64Index`` to ``Int64Index``, or when casting to an ``ExtensionArray`` dtype (:issue:`32013`) - :meth:`Series.append` will now raise a ``TypeError`` when passed a DataFrame or a sequence containing Dataframe (:issue:`31413`) - :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 093e1d4ab3942..94dc216c82f55 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -278,7 +278,7 @@ def aggregate( if isinstance(ret, dict): from pandas import concat - ret = concat(ret, axis=1) + ret = concat(ret.values(), axis=1, keys=[key.label for key in ret.keys()]) return ret agg = aggregate @@ -307,8 +307,8 @@ def _aggregate_multiple_funcs(self, arg): arg = zip(columns, arg) - results = {} - for name, func in arg: + results: Dict[base.OutputKey, Union[Series, DataFrame]] = {} + for idx, (name, func) in enumerate(arg): obj = self # reset the cache so that we @@ -317,13 +317,14 @@ def _aggregate_multiple_funcs(self, arg): obj = copy.copy(obj) obj._reset_cache() obj._selection = name - results[name] = obj.aggregate(func) + results[base.OutputKey(label=name, position=idx)] = obj.aggregate(func) if any(isinstance(x, DataFrame) for x in results.values()): # let higher level handle return results - return self.obj._constructor_expanddim(results, columns=columns) + output = self._wrap_aggregated_output(results) + return self.obj._constructor_expanddim(output, columns=columns) def _wrap_series_output( self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Index, @@ -354,10 +355,12 @@ def _wrap_series_output( if len(output) > 1: result = self.obj._constructor_expanddim(indexed_output, index=index) result.columns = columns - else: + elif not columns.empty: result = self.obj._constructor( indexed_output[0], index=index, name=columns[0] ) + else: + result = self.obj._constructor_expanddim() return result diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index dbd713a0af4cf..bf465635c0085 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -2,10 +2,13 @@ test .agg behavior / note that .apply is tested generally in test_groupby.py """ import functools +from functools import partial import numpy as np import pytest +from pandas.errors import PerformanceWarning + from pandas.core.dtypes.common import is_integer_dtype import pandas as pd @@ -252,6 +255,61 @@ def test_agg_multiple_functions_maintain_order(df): tm.assert_index_equal(result.columns, exp_cols) +def test_agg_multiple_functions_same_name(): + # GH 30880 + df = pd.DataFrame( + np.random.randn(1000, 3), + index=pd.date_range("1/1/2012", freq="S", periods=1000), + columns=["A", "B", "C"], + ) + result = df.resample("3T").agg( + {"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]} + ) + expected_index = pd.date_range("1/1/2012", freq="3T", periods=6) + expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")]) + expected_values = np.array( + [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]] + ).T + expected = pd.DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_functions_same_name_with_ohlc_present(): + # GH 30880 + # ohlc expands dimensions, so different test to the above is required. + df = pd.DataFrame( + np.random.randn(1000, 3), + index=pd.date_range("1/1/2012", freq="S", periods=1000), + columns=["A", "B", "C"], + ) + result = df.resample("3T").agg( + {"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]} + ) + expected_index = pd.date_range("1/1/2012", freq="3T", periods=6) + expected_columns = pd.MultiIndex.from_tuples( + [ + ("A", "ohlc", "open"), + ("A", "ohlc", "high"), + ("A", "ohlc", "low"), + ("A", "ohlc", "close"), + ("A", "quantile", "A"), + ("A", "quantile", "A"), + ] + ) + non_ohlc_expected_values = np.array( + [df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]] + ).T + expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values]) + expected = pd.DataFrame( + expected_values, columns=expected_columns, index=expected_index + ) + # PerformanceWarning is thrown by `assert col in right` in assert_frame_equal + with tm.assert_produces_warning(PerformanceWarning): + tm.assert_frame_equal(result, expected) + + def test_multiple_functions_tuples_and_non_tuples(df): # #1359 funcs = [("foo", "mean"), "std"] From b0468aa45f3912d6f8823d1cd418af34ffdcd2b1 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Tue, 14 Jul 2020 23:32:48 +0100 Subject: [PATCH 126/632] CLN: remove kwargs in Index.format (#35122) --- pandas/core/indexes/base.py | 9 +++++++-- pandas/core/indexes/datetimelike.py | 22 ++++++++++++++++++++- pandas/core/indexes/multi.py | 30 ++++++++++++++++------------- pandas/core/indexes/range.py | 2 +- pandas/io/formats/format.py | 3 +-- pandas/io/formats/html.py | 1 + 6 files changed, 48 insertions(+), 19 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 2f12a2e4c27ea..3dbee7d0929cb 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -902,7 +902,12 @@ def _mpl_repr(self): # how to represent ourselves to matplotlib return self.values - def format(self, name: bool = False, formatter=None, **kwargs): + def format( + self, + name: bool = False, + formatter: Optional[Callable] = None, + na_rep: str_t = "NaN", + ) -> List[str_t]: """ Render a string representation of the Index. """ @@ -917,7 +922,7 @@ def format(self, name: bool = False, formatter=None, **kwargs): if formatter is not None: return header + list(self.map(formatter)) - return self._format_with_header(header, **kwargs) + return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header, na_rep="NaN") -> List[str_t]: from pandas.io.formats.format import format_array diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 7be6aa50fa16b..15a7e25238983 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -9,7 +9,7 @@ from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib from pandas._libs.tslibs import BaseOffset, Resolution, Tick, timezones from pandas._libs.tslibs.parsing import DateParseError -from pandas._typing import Label +from pandas._typing import Callable, Label from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly, doc @@ -338,6 +338,26 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs): # -------------------------------------------------------------------- # Rendering Methods + def format( + self, + name: bool = False, + formatter: Optional[Callable] = None, + na_rep: str = "NaT", + date_format: Optional[str] = None, + ) -> List[str]: + """ + Render a string representation of the Index. + """ + header = [] + if name: + fmt_name = ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) + header.append(fmt_name) + + if formatter is not None: + return header + list(self.map(formatter)) + + return self._format_with_header(header, na_rep=na_rep, date_format=date_format) + def _format_with_header(self, header, na_rep="NaT", date_format=None) -> List[str]: return header + list( self._format_native_types(na_rep=na_rep, date_format=date_format) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 15db6c51a1f2f..235da89083d0a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2,6 +2,7 @@ from typing import ( TYPE_CHECKING, Any, + Callable, Hashable, Iterable, List, @@ -1231,13 +1232,17 @@ def _format_native_types(self, na_rep="nan", **kwargs): def format( self, - space=2, + name: Optional[bool] = None, + formatter: Optional[Callable] = None, + na_rep: Optional[str] = None, + names: bool = False, + space: int = 2, sparsify=None, - adjoin=True, - names=False, - na_rep=None, - formatter=None, - ): + adjoin: bool = True, + ) -> List: + if name is not None: + names = name + if len(self) == 0: return [] @@ -1265,13 +1270,13 @@ def format( stringified_levels.append(formatted) result_levels = [] - for lev, name in zip(stringified_levels, self.names): + for lev, lev_name in zip(stringified_levels, self.names): level = [] if names: level.append( - pprint_thing(name, escape_chars=("\t", "\r", "\n")) - if name is not None + pprint_thing(lev_name, escape_chars=("\t", "\r", "\n")) + if lev_name is not None else "" ) @@ -1283,10 +1288,9 @@ def format( if sparsify: sentinel = "" - # GH3547 - # use value of sparsify as sentinel, unless it's an obvious - # "Truthy" value - if sparsify not in [True, 1]: + # GH3547 use value of sparsify as sentinel if it's "Falsey" + assert isinstance(sparsify, bool) or sparsify is lib.no_default + if sparsify in [False, lib.no_default]: sentinel = sparsify # little bit of a kludge job for #1217 result_levels = _sparsify( diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 6d9fd6efe54a3..e5e98039ff77b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -198,7 +198,7 @@ def _format_data(self, name=None): return None def _format_with_header(self, header, na_rep="NaN") -> List[str]: - return header + list(map(pprint_thing, self._range)) + return header + [pprint_thing(x) for x in self._range] # -------------------------------------------------------------------- _deprecation_message = ( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 27df014620f56..fe85eab4bfbf5 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -330,9 +330,8 @@ def _get_footer(self) -> str: def _get_formatted_index(self) -> Tuple[List[str], bool]: index = self.tr_series.index - is_multi = isinstance(index, MultiIndex) - if is_multi: + if isinstance(index, MultiIndex): have_header = any(name for name in index.names) fmt_index = index.format(names=True) else: diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 7ea2417ceb24b..13f0ab1e8a52c 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -442,6 +442,7 @@ def _write_hierarchical_rows( frame = self.fmt.tr_frame nrows = len(frame) + assert isinstance(frame.index, MultiIndex) idx_values = frame.index.format(sparsify=False, adjoin=False, names=False) idx_values = list(zip(*idx_values)) From d99d44a29194ba9487aa741297d41363d1d6f2e3 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 15 Jul 2020 07:24:39 -0500 Subject: [PATCH 127/632] Revert "BUG: fix union_indexes not supporting sort=False for Index subclasses (#35098)" (#35277) This reverts commit c21be0562a33d149b62735fc82aff80e4d5942f5. --- doc/source/whatsnew/v1.1.0.rst | 1 - pandas/core/indexes/api.py | 8 +------- pandas/tests/frame/test_constructors.py | 6 ++---- pandas/tests/indexes/test_common.py | 18 +---------------- pandas/tests/reshape/test_concat.py | 14 ------------- pandas/tests/reshape/test_melt.py | 26 ++++++++++++------------- pandas/tests/test_strings.py | 9 +-------- 7 files changed, 18 insertions(+), 64 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 3faca9c8868ca..dc1e7523046d5 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1114,7 +1114,6 @@ Reshaping - Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) - Bug in :meth:`Series.where` with an empty Series and empty ``cond`` having non-bool dtype (:issue:`34592`) - Fixed regression where :meth:`DataFrame.apply` would raise ``ValueError`` for elements whth ``S`` dtype (:issue:`34529`) -- Bug in :meth:`DataFrame.append` leading to sorting columns even when ``sort=False`` is specified (:issue:`35092`) Sparse ^^^^^^ diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 9849742abcfca..4c5a70f4088ee 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -214,13 +214,7 @@ def conv(i): return result.union_many(indexes[1:]) else: for other in indexes[1:]: - # GH 35092. Index.union expects sort=None instead of sort=True - # to signify that sort=True isn't fully implemented and - # legacy implementation sometimes might not sort (see GH 24959) - # In this case we currently sort in _get_combined_index - if sort: - sort = None - result = result.union(other, sort=sort) + result = result.union(other) return result elif kind == "array": index = indexes[0] diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 17ac2307b9da6..1631342c359c1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2578,13 +2578,11 @@ def test_construct_with_two_categoricalindex_series(self): index=pd.CategoricalIndex(["f", "female", "m", "male", "unknown"]), ) result = DataFrame([s1, s2]) - # GH 35092. Extra s2 columns are now appended to s1 columns - # in original order expected = DataFrame( np.array( - [[39.0, 6.0, 4.0, np.nan, np.nan], [152.0, 242.0, 150.0, 2.0, 2.0]] + [[np.nan, 39.0, np.nan, 6.0, 4.0], [2.0, 152.0, 2.0, 242.0, 150.0]] ), - columns=["female", "male", "unknown", "f", "m"], + columns=["f", "female", "m", "male", "unknown"], ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index c85696e02ad39..02a173eb4958d 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -13,9 +13,8 @@ from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd -from pandas import CategoricalIndex, Index, MultiIndex, RangeIndex +from pandas import CategoricalIndex, MultiIndex, RangeIndex import pandas._testing as tm -from pandas.core.indexes.api import union_indexes class TestCommon: @@ -396,18 +395,3 @@ def test_astype_preserves_name(self, index, dtype, copy): assert result.names == index.names else: assert result.name == index.name - - -@pytest.mark.parametrize("arr", [[0, 1, 4, 3]]) -@pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"]) -def test_union_index_no_sort(arr, sort, dtype): - # GH 35092. Check that we don't sort with sort=False - ind1 = Index(arr[:2], dtype=dtype) - ind2 = Index(arr[2:], dtype=dtype) - - # sort is None indicates that we sort the combined index - if sort is None: - arr.sort() - expected = Index(arr, dtype=dtype) - result = union_indexes([ind1, ind2], sort=sort) - tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ff95d8ad997a4..ffeb5ff0f8aaa 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2857,17 +2857,3 @@ def test_concat_frame_axis0_extension_dtypes(): result = pd.concat([df2, df1], ignore_index=True) expected = pd.DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("sort", [True, False]) -def test_append_sort(sort): - # GH 35092. Check that DataFrame.append respects the sort argument. - df1 = pd.DataFrame(data={0: [1, 2], 1: [3, 4]}) - df2 = pd.DataFrame(data={3: [1, 2], 2: [3, 4]}) - cols = list(df1.columns) + list(df2.columns) - if sort: - cols.sort() - - result = df1.append(df2, sort=sort).columns - expected = type(result)(cols) - tm.assert_index_equal(result, expected) diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 241721432bbf9..2b75a1ec6ca6e 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -732,11 +732,11 @@ def test_unbalanced(self): ) df["id"] = df.index exp_data = { - "X": ["X1", "X2", "X1", "X2"], - "A": [1.0, 2.0, 3.0, 4.0], - "B": [5.0, 6.0, np.nan, np.nan], - "id": [0, 1, 0, 1], - "year": [2010, 2010, 2011, 2011], + "X": ["X1", "X1", "X2", "X2"], + "A": [1.0, 3.0, 2.0, 4.0], + "B": [5.0, np.nan, 6.0, np.nan], + "id": [0, 0, 1, 1], + "year": [2010, 2011, 2010, 2011], } expected = pd.DataFrame(exp_data) expected = expected.set_index(["id", "year"])[["X", "A", "B"]] @@ -979,10 +979,10 @@ def test_nonnumeric_suffix(self): ) expected = pd.DataFrame( { - "A": ["X1", "X2", "X1", "X2"], - "colname": ["placebo", "placebo", "test", "test"], - "result": [5.0, 6.0, np.nan, np.nan], - "treatment": [1.0, 2.0, 3.0, 4.0], + "A": ["X1", "X1", "X2", "X2"], + "colname": ["placebo", "test", "placebo", "test"], + "result": [5.0, np.nan, 6.0, np.nan], + "treatment": [1.0, 3.0, 2.0, 4.0], } ) expected = expected.set_index(["A", "colname"]) @@ -1026,10 +1026,10 @@ def test_float_suffix(self): ) expected = pd.DataFrame( { - "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"], - "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1], - "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan], - "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], + "A": ["X1", "X1", "X1", "X1", "X2", "X2", "X2", "X2"], + "colname": [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1], + "result": [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan], + "treatment": [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0], } ) expected = expected.set_index(["A", "colname"]) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 3a4e54052305e..d9396d70f9112 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -636,15 +636,8 @@ def test_str_cat_align_mixed_inputs(self, join): # mixed list of indexed/unindexed u = np.array(["A", "B", "C", "D"]) expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"]) - # joint index of rhs [t, u]; u will be forced have index of s - # GH 35092. If right join, maintain order of t.index - if join == "inner": - rhs_idx = t.index & s.index - elif join == "right": - rhs_idx = t.index.union(s.index, sort=False) - else: - rhs_idx = t.index | s.index + rhs_idx = t.index & s.index if join == "inner" else t.index | s.index expected = expected_outer.loc[s.index.join(rhs_idx, how=join)] result = s.str.cat([t, u], join=join, na_rep="-") From 66003e4d3a3d76d4074b5e9a8db10ac82cba5906 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 15 Jul 2020 07:25:33 -0500 Subject: [PATCH 128/632] Fixed apply_index (#35165) --- doc/source/reference/offset_frequency.rst | 18 ++++ doc/source/whatsnew/v1.1.0.rst | 1 + pandas/_libs/tslibs/offsets.pyx | 99 +++++++++++++++++--- pandas/core/arrays/datetimes.py | 2 +- pandas/tests/tseries/offsets/test_offsets.py | 7 +- 5 files changed, 110 insertions(+), 17 deletions(-) diff --git a/doc/source/reference/offset_frequency.rst b/doc/source/reference/offset_frequency.rst index 1b63253cde2c5..e6271a7806706 100644 --- a/doc/source/reference/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -33,6 +33,7 @@ Methods :toctree: api/ DateOffset.apply + DateOffset.apply_index DateOffset.copy DateOffset.isAnchored DateOffset.onOffset @@ -117,6 +118,7 @@ Methods :toctree: api/ BusinessHour.apply + BusinessHour.apply_index BusinessHour.copy BusinessHour.isAnchored BusinessHour.onOffset @@ -201,6 +203,7 @@ Methods :toctree: api/ CustomBusinessHour.apply + CustomBusinessHour.apply_index CustomBusinessHour.copy CustomBusinessHour.isAnchored CustomBusinessHour.onOffset @@ -401,6 +404,7 @@ Methods :toctree: api/ CustomBusinessMonthEnd.apply + CustomBusinessMonthEnd.apply_index CustomBusinessMonthEnd.copy CustomBusinessMonthEnd.isAnchored CustomBusinessMonthEnd.onOffset @@ -447,6 +451,7 @@ Methods :toctree: api/ CustomBusinessMonthBegin.apply + CustomBusinessMonthBegin.apply_index CustomBusinessMonthBegin.copy CustomBusinessMonthBegin.isAnchored CustomBusinessMonthBegin.onOffset @@ -586,6 +591,7 @@ Methods :toctree: api/ WeekOfMonth.apply + WeekOfMonth.apply_index WeekOfMonth.copy WeekOfMonth.isAnchored WeekOfMonth.onOffset @@ -622,6 +628,7 @@ Methods :toctree: api/ LastWeekOfMonth.apply + LastWeekOfMonth.apply_index LastWeekOfMonth.copy LastWeekOfMonth.isAnchored LastWeekOfMonth.onOffset @@ -938,6 +945,7 @@ Methods :toctree: api/ FY5253.apply + FY5253.apply_index FY5253.copy FY5253.get_rule_code_suffix FY5253.get_year_end @@ -977,6 +985,7 @@ Methods :toctree: api/ FY5253Quarter.apply + FY5253Quarter.apply_index FY5253Quarter.copy FY5253Quarter.get_rule_code_suffix FY5253Quarter.get_weeks @@ -1013,6 +1022,7 @@ Methods :toctree: api/ Easter.apply + Easter.apply_index Easter.copy Easter.isAnchored Easter.onOffset @@ -1053,6 +1063,7 @@ Methods Tick.is_on_offset Tick.__call__ Tick.apply + Tick.apply_index Day --- @@ -1087,6 +1098,7 @@ Methods Day.is_on_offset Day.__call__ Day.apply + Day.apply_index Hour ---- @@ -1121,6 +1133,7 @@ Methods Hour.is_on_offset Hour.__call__ Hour.apply + Hour.apply_index Minute ------ @@ -1155,6 +1168,7 @@ Methods Minute.is_on_offset Minute.__call__ Minute.apply + Minute.apply_index Second ------ @@ -1189,6 +1203,7 @@ Methods Second.is_on_offset Second.__call__ Second.apply + Second.apply_index Milli ----- @@ -1223,6 +1238,7 @@ Methods Milli.is_on_offset Milli.__call__ Milli.apply + Milli.apply_index Micro ----- @@ -1257,6 +1273,7 @@ Methods Micro.is_on_offset Micro.__call__ Micro.apply + Micro.apply_index Nano ---- @@ -1291,6 +1308,7 @@ Methods Nano.is_on_offset Nano.__call__ Nano.apply + Nano.apply_index .. _api.frequencies: diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index dc1e7523046d5..85661c0507caa 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -787,6 +787,7 @@ Deprecations - :meth:`DatetimeIndex.week` and `DatetimeIndex.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeIndex.isocalendar().week` instead (:issue:`33595`) - :meth:`DatetimeArray.week` and `DatetimeArray.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeArray.isocalendar().week` instead (:issue:`33595`) - :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`) +- :meth:`~BusinessDay.apply_index` is deprecated and will be removed in a future version. Use ``offset + other`` instead (:issue:`34580`) - :meth:`DataFrame.tshift` and :meth:`Series.tshift` are deprecated and will be removed in a future version, use :meth:`DataFrame.shift` and :meth:`Series.shift` instead (:issue:`11631`) - Indexing an :class:`Index` object with a float key is deprecated, and will raise an ``IndexError`` in the future. You can manually convert to an integer key diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index b0c6648514e99..9a7ca15a2a1c2 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -86,19 +86,38 @@ cdef bint _is_normalized(datetime dt): return True +def apply_wrapper_core(func, self, other) -> ndarray: + result = func(self, other) + result = np.asarray(result) + + if self.normalize: + # TODO: Avoid circular/runtime import + from .vectorized import normalize_i8_timestamps + result = normalize_i8_timestamps(result.view("i8"), None) + + return result + + def apply_index_wraps(func): # Note: normally we would use `@functools.wraps(func)`, but this does # not play nicely with cython class methods - def wrapper(self, other) -> np.ndarray: + def wrapper(self, other): # other is a DatetimeArray + result = apply_wrapper_core(func, self, other) + result = type(other)(result) + warnings.warn("'Offset.apply_index(other)' is deprecated. " + "Use 'offset + other' instead.", FutureWarning) + return result - result = func(self, other) - result = np.asarray(result) + return wrapper - if self.normalize: - # TODO: Avoid circular/runtime import - from .vectorized import normalize_i8_timestamps - result = normalize_i8_timestamps(result.view("i8"), None) + +def apply_array_wraps(func): + # Note: normally we would use `@functools.wraps(func)`, but this does + # not play nicely with cython class methods + def wrapper(self, other) -> np.ndarray: + # other is a DatetimeArray + result = apply_wrapper_core(func, self, other) return result # do @functools.wraps(func) manually since it doesn't work on cdef funcs @@ -515,6 +534,10 @@ cdef class BaseOffset: raises NotImplementedError for offsets without a vectorized implementation. + .. deprecated:: 1.1.0 + + Use ``offset + dtindex`` instead. + Parameters ---------- index : DatetimeIndex @@ -522,12 +545,25 @@ cdef class BaseOffset: Returns ------- DatetimeIndex + + Raises + ------ + NotImplementedError + When the specific offset subclass does not have a vectorized + implementation. """ raise NotImplementedError( f"DateOffset subclass {type(self).__name__} " "does not have a vectorized implementation" ) + @apply_array_wraps + def _apply_array(self, dtarr): + raise NotImplementedError( + f"DateOffset subclass {type(self).__name__} " + "does not have a vectorized implementation" + ) + def rollback(self, dt) -> datetime: """ Roll provided date backward to next offset only if not on offset. @@ -992,7 +1028,11 @@ cdef class RelativeDeltaOffset(BaseOffset): ------- ndarray[datetime64[ns]] """ - dt64other = np.asarray(dtindex) + return self._apply_array(dtindex) + + @apply_array_wraps + def _apply_array(self, dtarr): + dt64other = np.asarray(dtarr) kwds = self.kwds relativedelta_fast = { "years", @@ -1321,7 +1361,11 @@ cdef class BusinessDay(BusinessMixin): @apply_index_wraps def apply_index(self, dtindex): - i8other = dtindex.view("i8") + return self._apply_array(dtindex) + + @apply_array_wraps + def _apply_array(self, dtarr): + i8other = dtarr.view("i8") return shift_bdays(i8other, self.n) def is_on_offset(self, dt: datetime) -> bool: @@ -1804,8 +1848,12 @@ cdef class YearOffset(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): + return self._apply_array(dtindex) + + @apply_array_wraps + def _apply_array(self, dtarr): shifted = shift_quarters( - dtindex.view("i8"), self.n, self.month, self._day_opt, modby=12 + dtarr.view("i8"), self.n, self.month, self._day_opt, modby=12 ) return shifted @@ -1957,8 +2005,12 @@ cdef class QuarterOffset(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): + return self._apply_array(dtindex) + + @apply_array_wraps + def _apply_array(self, dtarr): shifted = shift_quarters( - dtindex.view("i8"), self.n, self.startingMonth, self._day_opt + dtarr.view("i8"), self.n, self.startingMonth, self._day_opt ) return shifted @@ -2072,7 +2124,11 @@ cdef class MonthOffset(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): - shifted = shift_months(dtindex.view("i8"), self.n, self._day_opt) + return self._apply_array(dtindex) + + @apply_array_wraps + def _apply_array(self, dtarr): + shifted = shift_months(dtarr.view("i8"), self.n, self._day_opt) return shifted cpdef __setstate__(self, state): @@ -2209,8 +2265,14 @@ cdef class SemiMonthOffset(SingleConstructorOffset): @cython.wraparound(False) @cython.boundscheck(False) def apply_index(self, dtindex): + return self._apply_array(dtindex) + + @apply_array_wraps + @cython.wraparound(False) + @cython.boundscheck(False) + def _apply_array(self, dtarr): cdef: - int64_t[:] i8other = dtindex.view("i8") + int64_t[:] i8other = dtarr.view("i8") Py_ssize_t i, count = len(i8other) int64_t val int64_t[:] out = np.empty(count, dtype="i8") @@ -2368,12 +2430,16 @@ cdef class Week(SingleConstructorOffset): @apply_index_wraps def apply_index(self, dtindex): + return self._apply_array(dtindex) + + @apply_array_wraps + def _apply_array(self, dtarr): if self.weekday is None: td = timedelta(days=7 * self.n) td64 = np.timedelta64(td, "ns") - return dtindex + td64 + return dtarr + td64 else: - i8other = dtindex.view("i8") + i8other = dtarr.view("i8") return self._end_apply_index(i8other) @cython.wraparound(False) @@ -3146,6 +3212,9 @@ cdef class CustomBusinessDay(BusinessDay): def apply_index(self, dtindex): raise NotImplementedError + def _apply_array(self, dtarr): + raise NotImplementedError + def is_on_offset(self, dt: datetime) -> bool: if self.normalize and not _is_normalized(dt): return False diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 7058ed3682d59..d674b1c476d2c 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -683,7 +683,7 @@ def _add_offset(self, offset): values = self.tz_localize(None) else: values = self - result = offset.apply_index(values) + result = offset._apply_array(values) result = DatetimeArray._simple_new(result) result = result.tz_localize(self.tz) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index cffaa7b43d0cf..8c51908c547f4 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3663,14 +3663,19 @@ def test_offset(self, case): @pytest.mark.parametrize("case", offset_cases) def test_apply_index(self, case): + # https://github.com/pandas-dev/pandas/issues/34580 offset, cases = case s = DatetimeIndex(cases.keys()) + exp = DatetimeIndex(cases.values()) + with tm.assert_produces_warning(None): # GH#22535 check that we don't get a FutureWarning from adding # an integer array to PeriodIndex result = offset + s + tm.assert_index_equal(result, exp) - exp = DatetimeIndex(cases.values()) + with tm.assert_produces_warning(FutureWarning): + result = offset.apply_index(s) tm.assert_index_equal(result, exp) on_offset_cases = [ From 8090479f74a1d1d0900b7297d5388694fd78b8be Mon Sep 17 00:00:00 2001 From: Christos Petropoulos Date: Wed, 15 Jul 2020 14:27:21 +0200 Subject: [PATCH 129/632] Place the calculation of mask prior to the calls of comp in replace_list to improve performance (#35229) --- pandas/core/internals/managers.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index c82670106d3b6..d5947726af7fd 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -596,18 +596,22 @@ def replace_list( # figure out our mask apriori to avoid repeated replacements values = self.as_array() - def comp(s, regex=False): + def comp(s: Scalar, mask: np.ndarray, regex: bool = False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): - return isna(values) + return ~mask s = com.maybe_box_datetimelike(s) - return _compare_or_regex_search(values, s, regex) + return _compare_or_regex_search(values, s, regex, mask) - masks = [comp(s, regex) for s in src_list] + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + mask = ~isna(values) + + masks = [comp(s, mask, regex) for s in src_list] result_blocks = [] src_len = len(src_list) - 1 @@ -1895,7 +1899,7 @@ def _merge_blocks( def _compare_or_regex_search( - a: ArrayLike, b: Scalar, regex: bool = False + a: ArrayLike, b: Scalar, regex: bool = False, mask: Optional[ArrayLike] = None ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -1908,6 +1912,7 @@ def _compare_or_regex_search( a : array_like b : scalar regex : bool, default False + mask : array_like or None (default) Returns ------- @@ -1941,7 +1946,7 @@ def _check_comparison_types( ) # GH#32621 use mask to avoid comparing to NAs - if isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): + if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): mask = np.reshape(~(isna(a)), a.shape) if isinstance(a, np.ndarray): a = a[mask] @@ -1953,7 +1958,7 @@ def _check_comparison_types( result = op(a) - if isinstance(result, np.ndarray): + if isinstance(result, np.ndarray) and mask is not None: # The shape of the mask can differ to that of the result # since we may compare only a subset of a's or b's elements tmp = np.zeros(mask.shape, dtype=np.bool_) From 28810c442de2dcf30e2d31cea1a42f78a08f33bd Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Wed, 15 Jul 2020 05:28:03 -0700 Subject: [PATCH 130/632] ENH: Add compute.use_numba configuration for automatically using numba (#35182) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/config_init.py | 17 +++++++++ pandas/core/groupby/generic.py | 23 ++++++------ pandas/core/groupby/groupby.py | 9 +++-- pandas/core/groupby/ops.py | 7 ++-- pandas/core/util/numba_.py | 13 +++++++ pandas/core/window/rolling.py | 37 +++++++++----------- pandas/tests/groupby/aggregate/test_numba.py | 17 ++++++++- pandas/tests/groupby/transform/test_numba.py | 17 ++++++++- pandas/tests/util/test_numba.py | 12 +++++++ pandas/tests/window/test_numba.py | 14 +++++++- 11 files changed, 124 insertions(+), 43 deletions(-) create mode 100644 pandas/tests/util/test_numba.py diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 85661c0507caa..4b893bcd0c87d 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -338,6 +338,7 @@ Other enhancements - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) +- ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 54d23fe8829e6..86f6be77bc505 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -52,6 +52,20 @@ def use_numexpr_cb(key): expressions.set_use_numexpr(cf.get_option(key)) +use_numba_doc = """ +: bool + Use the numba engine option for select operations if it is installed, + the default is False + Valid values: False,True +""" + + +def use_numba_cb(key): + from pandas.core.util import numba_ + + numba_.set_use_numba(cf.get_option(key)) + + with cf.config_prefix("compute"): cf.register_option( "use_bottleneck", @@ -63,6 +77,9 @@ def use_numexpr_cb(key): cf.register_option( "use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb ) + cf.register_option( + "use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb + ) # # options from the "display" namespace diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 94dc216c82f55..b9a8f3d5a5176 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -80,6 +80,7 @@ from pandas.core.util.numba_ import ( NUMBA_FUNC_CACHE, generate_numba_func, + maybe_use_numba, split_for_numba, ) @@ -227,9 +228,7 @@ def apply(self, func, *args, **kwargs): @doc( _agg_template, examples=_agg_examples_doc, klass="Series", ) - def aggregate( - self, func=None, *args, engine="cython", engine_kwargs=None, **kwargs - ): + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): relabeling = func is None columns = None @@ -483,7 +482,7 @@ def _aggregate_named(self, func, *args, **kwargs): @Substitution(klass="Series") @Appender(_transform_template) - def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs): + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): func = self._get_cython_func(func) or func if not isinstance(func, str): @@ -515,7 +514,7 @@ def _transform_general( Transform with a non-str `func`. """ - if engine == "numba": + if maybe_use_numba(engine): numba_func, cache_key = generate_numba_func( func, engine_kwargs, kwargs, "groupby_transform" ) @@ -525,7 +524,7 @@ def _transform_general( results = [] for name, group in self: object.__setattr__(group, "name", name) - if engine == "numba": + if maybe_use_numba(engine): values, index = split_for_numba(group) res = numba_func(values, index, *args) if cache_key not in NUMBA_FUNC_CACHE: @@ -934,13 +933,11 @@ class DataFrameGroupBy(GroupBy[DataFrame]): @doc( _agg_template, examples=_agg_examples_doc, klass="DataFrame", ) - def aggregate( - self, func=None, *args, engine="cython", engine_kwargs=None, **kwargs - ): + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): relabeling, func, columns, order = reconstruct_func(func, **kwargs) - if engine == "numba": + if maybe_use_numba(engine): return self._python_agg_general( func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs ) @@ -1385,7 +1382,7 @@ def _transform_general( applied = [] obj = self._obj_with_exclusions gen = self.grouper.get_iterator(obj, axis=self.axis) - if engine == "numba": + if maybe_use_numba(engine): numba_func, cache_key = generate_numba_func( func, engine_kwargs, kwargs, "groupby_transform" ) @@ -1395,7 +1392,7 @@ def _transform_general( for name, group in gen: object.__setattr__(group, "name", name) - if engine == "numba": + if maybe_use_numba(engine): values, index = split_for_numba(group) res = numba_func(values, index, *args) if cache_key not in NUMBA_FUNC_CACHE: @@ -1446,7 +1443,7 @@ def _transform_general( @Substitution(klass="DataFrame") @Appender(_transform_template) - def transform(self, func, *args, engine="cython", engine_kwargs=None, **kwargs): + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): # optimized transforms func = self._get_cython_func(func) or func diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index d039b715b3c08..65483abbd2a6e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -65,6 +65,7 @@ class providing the base-class of operations. from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex from pandas.core.series import Series from pandas.core.sorting import get_group_index_sorter +from pandas.core.util.numba_ import maybe_use_numba _common_see_also = """ See Also @@ -286,9 +287,10 @@ class providing the base-class of operations. .. versionchanged:: 1.1.0 *args Positional arguments to pass to func -engine : str, default 'cython' +engine : str, default None * ``'cython'`` : Runs the function through C-extensions from cython. * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: 1.1.0 engine_kwargs : dict, default None @@ -393,9 +395,10 @@ class providing the base-class of operations. .. versionchanged:: 1.1.0 *args Positional arguments to pass to func -engine : str, default 'cython' +engine : str, default None * ``'cython'`` : Runs the function through C-extensions from cython. * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: 1.1.0 engine_kwargs : dict, default None @@ -1063,7 +1066,7 @@ def _python_agg_general( # agg_series below assumes ngroups > 0 continue - if engine == "numba": + if maybe_use_numba(engine): result, counts = self.grouper.agg_series( obj, func, diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 74db87f46c5e2..3aaeef3b63760 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -58,6 +58,7 @@ from pandas.core.util.numba_ import ( NUMBA_FUNC_CACHE, generate_numba_func, + maybe_use_numba, split_for_numba, ) @@ -620,7 +621,7 @@ def agg_series( # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 - if engine == "numba": + if maybe_use_numba(engine): return self._aggregate_series_pure_python( obj, func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs ) @@ -678,7 +679,7 @@ def _aggregate_series_pure_python( **kwargs, ): - if engine == "numba": + if maybe_use_numba(engine): numba_func, cache_key = generate_numba_func( func, engine_kwargs, kwargs, "groupby_agg" ) @@ -691,7 +692,7 @@ def _aggregate_series_pure_python( splitter = get_splitter(obj, group_index, ngroups, axis=0) for label, group in splitter: - if engine == "numba": + if maybe_use_numba(engine): values, index = split_for_numba(group) res = numba_func(values, index, *args) if cache_key not in NUMBA_FUNC_CACHE: diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index c3f60ea7cc217..c9b7943478cdd 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -10,9 +10,22 @@ from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError +GLOBAL_USE_NUMBA: bool = False NUMBA_FUNC_CACHE: Dict[Tuple[Callable, str], Callable] = dict() +def maybe_use_numba(engine: Optional[str]) -> bool: + """Signal whether to use numba routines.""" + return engine == "numba" or (engine is None and GLOBAL_USE_NUMBA) + + +def set_use_numba(enable: bool = False) -> None: + global GLOBAL_USE_NUMBA + if enable: + import_optional_dependency("numba") + GLOBAL_USE_NUMBA = enable + + def check_kwargs_and_nopython( kwargs: Optional[Dict] = None, nopython: Optional[bool] = None ) -> None: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 8cb53ebd92214..48953f6a75487 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -39,7 +39,7 @@ import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexes.api import Index, MultiIndex, ensure_index -from pandas.core.util.numba_ import NUMBA_FUNC_CACHE +from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.core.window.common import ( WindowGroupByMixin, _doc_template, @@ -1298,10 +1298,11 @@ def count(self): objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. - engine : str, default 'cython' + engine : str, default None * ``'cython'`` : Runs rolling apply through C-extensions from cython. * ``'numba'`` : Runs rolling apply through JIT compiled code from numba. Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` .. versionadded:: 1.0.0 @@ -1357,18 +1358,7 @@ def apply( if not is_bool(raw): raise ValueError("raw parameter must be `True` or `False`") - if engine == "cython": - if engine_kwargs is not None: - raise ValueError("cython engine does not accept engine_kwargs") - # Cython apply functions handle center, so don't need to use - # _apply's center handling - window = self._get_window() - offset = calculate_center_offset(window) if self.center else 0 - apply_func = self._generate_cython_apply_func( - args, kwargs, raw, offset, func - ) - center = False - elif engine == "numba": + if maybe_use_numba(engine): if raw is False: raise ValueError("raw must be `True` when using the numba engine") cache_key = (func, "rolling_apply") @@ -1380,6 +1370,17 @@ def apply( args, kwargs, func, engine_kwargs ) center = self.center + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + # Cython apply functions handle center, so don't need to use + # _apply's center handling + window = self._get_window() + offset = calculate_center_offset(window) if self.center else 0 + apply_func = self._generate_cython_apply_func( + args, kwargs, raw, offset, func + ) + center = False else: raise ValueError("engine must be either 'numba' or 'cython'") @@ -2053,13 +2054,7 @@ def count(self): @Substitution(name="rolling") @Appender(_shared_docs["apply"]) def apply( - self, - func, - raw=False, - engine="cython", - engine_kwargs=None, - args=None, - kwargs=None, + self, func, raw=False, engine=None, engine_kwargs=None, args=None, kwargs=None, ): return super().apply( func, diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 726d79535184a..690694b0e66f5 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -4,7 +4,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame +from pandas import DataFrame, option_context import pandas._testing as tm from pandas.core.util.numba_ import NUMBA_FUNC_CACHE @@ -113,3 +113,18 @@ def func_2(values, index): result = grouped.agg(func_1, engine="numba", engine_kwargs=engine_kwargs) expected = grouped.agg(lambda x: np.mean(x) - 3.4, engine="cython") tm.assert_equal(result, expected) + + +@td.skip_if_no("numba", "0.46.0") +def test_use_global_config(): + def func_1(values, index): + return np.mean(values) - 3.4 + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + ) + grouped = data.groupby(0) + expected = grouped.agg(func_1, engine="numba") + with option_context("compute.use_numba", True): + result = grouped.agg(func_1, engine=None) + tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index 9a4015ac983c5..ee482571e644d 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -3,7 +3,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame +from pandas import DataFrame, option_context import pandas._testing as tm from pandas.core.util.numba_ import NUMBA_FUNC_CACHE @@ -112,3 +112,18 @@ def func_2(values, index): result = grouped.transform(func_1, engine="numba", engine_kwargs=engine_kwargs) expected = grouped.transform(lambda x: x + 1, engine="cython") tm.assert_equal(result, expected) + + +@td.skip_if_no("numba", "0.46.0") +def test_use_global_config(): + def func_1(values, index): + return values + 1 + + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + ) + grouped = data.groupby(0) + expected = grouped.transform(func_1, engine="numba") + with option_context("compute.use_numba", True): + result = grouped.transform(func_1, engine=None) + tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/util/test_numba.py b/pandas/tests/util/test_numba.py new file mode 100644 index 0000000000000..27b68ff0f6044 --- /dev/null +++ b/pandas/tests/util/test_numba.py @@ -0,0 +1,12 @@ +import pytest + +import pandas.util._test_decorators as td + +from pandas import option_context + + +@td.skip_if_installed("numba") +def test_numba_not_installed_option_context(): + with pytest.raises(ImportError, match="Missing optional"): + with option_context("compute.use_numba", True): + pass diff --git a/pandas/tests/window/test_numba.py b/pandas/tests/window/test_numba.py index 7e049af0ca1f8..35bdb972a7bc0 100644 --- a/pandas/tests/window/test_numba.py +++ b/pandas/tests/window/test_numba.py @@ -3,7 +3,7 @@ import pandas.util._test_decorators as td -from pandas import Series +from pandas import Series, option_context import pandas._testing as tm from pandas.core.util.numba_ import NUMBA_FUNC_CACHE @@ -75,3 +75,15 @@ def func_2(x): ) expected = roll.apply(func_1, engine="cython", raw=True) tm.assert_series_equal(result, expected) + + +@td.skip_if_no("numba", "0.46.0") +def test_use_global_config(): + def f(x): + return np.mean(x) + 2 + + s = Series(range(10)) + with option_context("compute.use_numba", True): + result = s.rolling(2).apply(f, engine=None, raw=True) + expected = s.rolling(2).apply(f, engine="numba", raw=True) + tm.assert_series_equal(expected, result) From 08b9c1b9cd502a94c34bd7afe92b574316f443f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Quang=20Nguy=E1=BB=85n?= <30631476+quangngd@users.noreply.github.com> Date: Wed, 15 Jul 2020 19:29:53 +0700 Subject: [PATCH 131/632] ENH: Add index option to_markdown() (#33091) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/frame.py | 15 ++++++++- pandas/core/series.py | 13 ++++++-- pandas/tests/io/formats/test_to_markdown.py | 35 +++++++++++++++++++++ 4 files changed, 61 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 4b893bcd0c87d..814dbe999d5c1 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -335,6 +335,7 @@ Other enhancements - :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list or dict to change only some specific columns' width (:issue:`28917`). - :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`) - :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). +- :meth:`DataFrame.to_markdown` and :meth:`Series.to_markdown` now accept ``index`` argument as an alias for tabulate's ``showindex`` (:issue:`32667`) - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cfe5621fec14e..0268d19e00b97 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2236,10 +2236,23 @@ def to_feather(self, path, **kwargs) -> None: """, ) def to_markdown( - self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs + self, + buf: Optional[IO[str]] = None, + mode: Optional[str] = None, + index: bool = True, + **kwargs, ) -> Optional[str]: + if "showindex" in kwargs: + warnings.warn( + "'showindex' is deprecated. Only 'index' will be used " + "in a future version. Use 'index' to silence this warning.", + FutureWarning, + stacklevel=2, + ) + kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") + kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: diff --git a/pandas/core/series.py b/pandas/core/series.py index 9a633079b8c1d..ef3be854bc3bb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1419,7 +1419,11 @@ def to_string( ), ) def to_markdown( - self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs + self, + buf: Optional[IO[str]] = None, + mode: Optional[str] = None, + index: bool = True, + **kwargs, ) -> Optional[str]: """ Print {klass} in Markdown-friendly format. @@ -1432,6 +1436,11 @@ def to_markdown( Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened. + index : bool, optional, default True + Add index (row) labels. + + .. versionadded:: 1.1.0 + **kwargs These parameters will be passed to `tabulate \ `_. @@ -1467,7 +1476,7 @@ def to_markdown( | 3 | quetzal | +----+----------+ """ - return self.to_frame().to_markdown(buf, mode, **kwargs) + return self.to_frame().to_markdown(buf, mode, index, **kwargs) # ---------------------------------------------------------------------- diff --git a/pandas/tests/io/formats/test_to_markdown.py b/pandas/tests/io/formats/test_to_markdown.py index 8893e4294353f..5223b313fef4f 100644 --- a/pandas/tests/io/formats/test_to_markdown.py +++ b/pandas/tests/io/formats/test_to_markdown.py @@ -3,6 +3,7 @@ import pytest import pandas as pd +import pandas._testing as tm pytest.importorskip("tabulate") @@ -53,3 +54,37 @@ def test_no_buf(capsys): assert ( result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |" ) + + +@pytest.mark.parametrize("index", [True, False, None]) +@pytest.mark.parametrize("showindex", [True, False, None]) +def test_index(index, showindex): + # GH 32667 + kwargs = {} + if index is not None: + kwargs["index"] = index + if showindex is not None: + kwargs["showindex"] = showindex + + df = pd.DataFrame([1, 2, 3]) + yes_index_result = ( + "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |" + ) + no_index_result = "| 0 |\n|----:|\n| 1 |\n| 2 |\n| 3 |" + + warning = FutureWarning if "showindex" in kwargs else None + with tm.assert_produces_warning(warning): + result = df.to_markdown(**kwargs) + + if "showindex" in kwargs: + # give showindex higher priority if specified + if showindex: + expected = yes_index_result + else: + expected = no_index_result + else: + if index in [True, None]: + expected = yes_index_result + else: + expected = no_index_result + assert result == expected From 492e3e97a01ec5abc1c2f0b0034e62f119b02522 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Wed, 15 Jul 2020 09:43:44 -0400 Subject: [PATCH 132/632] WIP CI: MacPython failing TestPandasContainer.test_to_json_large_numbers (#35184) --- pandas/tests/io/json/test_pandas.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 10f49b9b81528..97b53a6e66575 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1250,23 +1250,32 @@ def test_to_json_large_numbers(self, bigNum): json = series.to_json() expected = '{"articleId":' + str(bigNum) + "}" assert json == expected - # GH 20599 + + df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0]) + json = df.to_json() + expected = '{"0":{"articleId":' + str(bigNum) + "}}" + assert json == expected + + @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + @pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="GH-35279") + def test_read_json_large_numbers(self, bigNum): + # GH20599 + + series = Series(bigNum, dtype=object, index=["articleId"]) + json = '{"articleId":' + str(bigNum) + "}" with pytest.raises(ValueError): json = StringIO(json) result = read_json(json) tm.assert_series_equal(series, result) df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0]) - json = df.to_json() - expected = '{"0":{"articleId":' + str(bigNum) + "}}" - assert json == expected - # GH 20599 + json = '{"0":{"articleId":' + str(bigNum) + "}}" with pytest.raises(ValueError): json = StringIO(json) result = read_json(json) tm.assert_frame_equal(df, result) - def test_read_json_large_numbers(self): + def test_read_json_large_numbers2(self): # GH18842 json = '{"articleId": "1404366058080022500245"}' json = StringIO(json) From 7b718baccf070d38114f1455cc8d475644082a2f Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 15 Jul 2020 13:04:47 -0500 Subject: [PATCH 133/632] xfail 32-bit testsp (#35289) --- pandas/compat/__init__.py | 1 + pandas/tests/io/json/test_pandas.py | 4 ++-- pandas/tests/io/json/test_ujson.py | 1 + pandas/tests/resample/test_resampler_grouper.py | 3 ++- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index f7bb73b916ce0..b5a1dc2b2fb94 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -18,6 +18,7 @@ PY38 = sys.version_info >= (3, 8) PY39 = sys.version_info >= (3, 9) PYPY = platform.python_implementation() == "PyPy" +IS64 = sys.maxsize > 2 ** 32 # ---------------------------------------------------------------------------- diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 97b53a6e66575..c4db0170ecc90 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -13,7 +13,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json +from pandas import DataFrame, DatetimeIndex, Series, Timestamp, compat, read_json import pandas._testing as tm _seriesd = tm.getSeriesData() @@ -1257,7 +1257,7 @@ def test_to_json_large_numbers(self, bigNum): assert json == expected @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) - @pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="GH-35279") + @pytest.mark.skipif(not compat.IS64, reason="GH-35279") def test_read_json_large_numbers(self, bigNum): # GH20599 diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index 952c583040360..f969cbca9f427 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -561,6 +561,7 @@ def test_encode_long_conversion(self): assert long_input == ujson.decode(output) @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) + @pytest.mark.xfail(not compat.IS64, reason="GH-35288") def test_dumps_ints_larger_than_maxsize(self, bigNum): # GH34395 bigNum = sys.maxsize + 1 diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index cbf3a778f9ae0..b36b11582c1ec 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -6,7 +6,7 @@ from pandas.util._test_decorators import async_mark import pandas as pd -from pandas import DataFrame, Series, Timestamp +from pandas import DataFrame, Series, Timestamp, compat import pandas._testing as tm from pandas.core.indexes.datetimes import date_range @@ -317,6 +317,7 @@ def test_resample_groupby_with_label(): tm.assert_frame_equal(result, expected) +@pytest.mark.xfail(not compat.IS64, reason="GH-35148") def test_consistency_with_window(): # consistent return values with window From 32ed15d39312193a8c36171f0e120d0f46a26c2c Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Wed, 15 Jul 2020 20:07:51 +0100 Subject: [PATCH 134/632] BUG/TST: Read from Public s3 Bucket Without Creds (#34877) * Public Bucket Read Test --- pandas/io/common.py | 34 +++++++++++++++++++++++++++++++--- pandas/tests/io/test_s3.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/pandas/io/common.py b/pandas/io/common.py index 51323c5ff3ef5..32ec088f00d88 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -202,9 +202,37 @@ def get_filepath_or_buffer( filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://") fsspec = import_optional_dependency("fsspec") - file_obj = fsspec.open( - filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) - ).open() + # If botocore is installed we fallback to reading with anon=True + # to allow reads from public buckets + err_types_to_retry_with_anon: List[Any] = [] + try: + import_optional_dependency("botocore") + from botocore.exceptions import ClientError, NoCredentialsError + + err_types_to_retry_with_anon = [ + ClientError, + NoCredentialsError, + PermissionError, + ] + except ImportError: + pass + + try: + file_obj = fsspec.open( + filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) + ).open() + # GH 34626 Reads from Public Buckets without Credentials needs anon=True + except tuple(err_types_to_retry_with_anon): + if storage_options is None: + storage_options = {"anon": True} + else: + # don't mutate user input. + storage_options = dict(storage_options) + storage_options["anon"] = True + file_obj = fsspec.open( + filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) + ).open() + return file_obj, encoding, compression, True if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py index a76be9465f62a..5e0f7edf4d8ae 100644 --- a/pandas/tests/io/test_s3.py +++ b/pandas/tests/io/test_s3.py @@ -1,8 +1,12 @@ from io import BytesIO +import os import pytest +import pandas.util._test_decorators as td + from pandas import read_csv +import pandas._testing as tm def test_streaming_s3_objects(): @@ -15,3 +19,30 @@ def test_streaming_s3_objects(): for el in data: body = StreamingBody(BytesIO(el), content_length=len(el)) read_csv(body) + + +@tm.network +@td.skip_if_no("s3fs") +def test_read_without_creds_from_pub_bucket(): + # GH 34626 + # Use Amazon Open Data Registry - https://registry.opendata.aws/gdelt + result = read_csv("s3://gdelt-open-data/events/1981.csv", nrows=3) + assert len(result) == 3 + + +@tm.network +@td.skip_if_no("s3fs") +def test_read_with_creds_from_pub_bucke(): + # Ensure we can read from a public bucket with credentials + # GH 34626 + # Use Amazon Open Data Registry - https://registry.opendata.aws/gdelt + + with tm.ensure_safe_environment_variables(): + # temporary workaround as moto fails for botocore >= 1.11 otherwise, + # see https://github.com/spulec/moto/issues/1924 & 1952 + os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") + os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") + df = read_csv( + "s3://gdelt-open-data/events/1981.csv", nrows=5, sep="\t", header=None, + ) + assert len(df) == 5 From f329d8e9b9114ea2eef68c85a1b2778101142631 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 15 Jul 2020 16:09:14 -0500 Subject: [PATCH 135/632] CI: xfail failing 32-bit tests (#35295) * CI: xfail failing 32-bit tests https://github.com/pandas-dev/pandas/issues/35294 --- pandas/tests/window/test_api.py | 5 +++-- pandas/tests/window/test_apply.py | 3 ++- pandas/tests/window/test_grouper.py | 8 +++++++- pandas/tests/window/test_timeseries_window.py | 3 +++ 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 2c3d8b4608806..28e27791cad35 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Index, Series, Timestamp, concat +from pandas import DataFrame, Index, Series, Timestamp, compat, concat import pandas._testing as tm from pandas.core.base import SpecificationError @@ -277,7 +277,7 @@ def test_preserve_metadata(): @pytest.mark.parametrize( "func,window_size,expected_vals", [ - ( + pytest.param( "rolling", 2, [ @@ -289,6 +289,7 @@ def test_preserve_metadata(): [35.0, 40.0, 60.0, 40.0], [60.0, 80.0, 85.0, 80], ], + marks=pytest.mark.xfail(not compat.IS64, reason="GH-35294"), ), ( "expanding", diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py index bc38634da8941..2aaf6af103e98 100644 --- a/pandas/tests/window/test_apply.py +++ b/pandas/tests/window/test_apply.py @@ -4,7 +4,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range import pandas._testing as tm @@ -142,6 +142,7 @@ def test_invalid_kwargs_nopython(): @pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]]) +@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply_args_kwargs(args_kwargs): # GH 33433 def foo(x, par): diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 5b2687271f9d6..744ca264e91d9 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Series +from pandas import DataFrame, Series, compat import pandas._testing as tm from pandas.core.groupby.groupby import get_groupby @@ -23,6 +23,7 @@ def test_mutated(self): g = get_groupby(self.frame, by="A", mutated=True) assert g.mutated + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_getitem(self): g = self.frame.groupby("A") g_mutated = get_groupby(self.frame, by="A", mutated=True) @@ -55,6 +56,7 @@ def test_getitem_multiple(self): result = r.B.count() tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling(self): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -72,6 +74,7 @@ def test_rolling(self): @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] ) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_quantile(self, interpolation): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -102,6 +105,7 @@ def func(x): expected = g.apply(func) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply(self, raw): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -111,6 +115,7 @@ def test_rolling_apply(self, raw): expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply_mutability(self): # GH 14013 df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) @@ -192,6 +197,7 @@ def test_expanding_apply(self, raw): tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]]) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling(self, expected_value, raw_value): # GH 31754 diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py index 8aa4d7103e48a..90f919d5565b0 100644 --- a/pandas/tests/window/test_timeseries_window.py +++ b/pandas/tests/window/test_timeseries_window.py @@ -7,6 +7,7 @@ MultiIndex, Series, Timestamp, + compat, date_range, to_datetime, ) @@ -656,6 +657,7 @@ def agg_by_day(x): tm.assert_frame_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_monotonic(self): # GH 15130 @@ -685,6 +687,7 @@ def test_groupby_monotonic(self): result = df.groupby("name").rolling("180D", on="date")["amount"].sum() tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_non_monotonic(self): # GH 13966 (similar to #15130, closed by #15175) From 595208bc5184d0b3d596d90f62b72745c5ccdd64 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 15 Jul 2020 15:17:57 -0700 Subject: [PATCH 136/632] BUG: Use correct ExtensionArray reductions in DataFrame reductions (#35254) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/frame.py | 27 ++++++++++++++------ pandas/tests/arrays/integer/test_function.py | 9 +++++++ pandas/tests/frame/test_analytics.py | 23 +++++++++++++++++ 4 files changed, 52 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 814dbe999d5c1..36433a45ede9f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -916,6 +916,7 @@ Numeric - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) +- Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) Conversion ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 0268d19e00b97..b63467f08cdaa 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -118,6 +118,7 @@ from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray from pandas.core.arrays.sparse import SparseFrameAccessor +from pandas.core.construction import extract_array from pandas.core.generic import NDFrame, _shared_docs from pandas.core.indexes import base as ibase from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences @@ -8512,7 +8513,14 @@ def _count_level(self, level, axis=0, numeric_only=False): return result def _reduce( - self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds + self, + op, + name: str, + axis=0, + skipna=True, + numeric_only=None, + filter_type=None, + **kwds, ): assert filter_type is None or filter_type == "bool", filter_type @@ -8544,8 +8552,11 @@ def _reduce( labels = self._get_agg_axis(axis) constructor = self._constructor - def f(x): - return op(x, axis=axis, skipna=skipna, **kwds) + def func(values): + if is_extension_array_dtype(values.dtype): + return extract_array(values)._reduce(name, skipna=skipna, **kwds) + else: + return op(values, axis=axis, skipna=skipna, **kwds) def _get_data(axis_matters): if filter_type is None: @@ -8592,7 +8603,7 @@ def blk_func(values): out[:] = coerce_to_dtypes(out.values, df.dtypes) return out - if not self._is_homogeneous_type: + if not self._is_homogeneous_type or self._mgr.any_extension_types: # try to avoid self.values call if filter_type is None and axis == 0 and len(self) > 0: @@ -8612,7 +8623,7 @@ def blk_func(values): from pandas.core.apply import frame_apply opa = frame_apply( - self, func=f, result_type="expand", ignore_failures=True + self, func=func, result_type="expand", ignore_failures=True ) result = opa.get_result() if result.ndim == self.ndim: @@ -8624,7 +8635,7 @@ def blk_func(values): values = data.values try: - result = f(values) + result = func(values) except TypeError: # e.g. in nanops trying to convert strs to float @@ -8635,7 +8646,7 @@ def blk_func(values): values = data.values with np.errstate(all="ignore"): - result = f(values) + result = func(values) else: if numeric_only: @@ -8646,7 +8657,7 @@ def blk_func(values): else: data = self values = data.values - result = f(values) + result = func(values) if filter_type == "bool" and is_object_dtype(values) and axis is None: # work around https://github.com/numpy/numpy/issues/10489 diff --git a/pandas/tests/arrays/integer/test_function.py b/pandas/tests/arrays/integer/test_function.py index 44c3077228e80..a81434339fdae 100644 --- a/pandas/tests/arrays/integer/test_function.py +++ b/pandas/tests/arrays/integer/test_function.py @@ -133,6 +133,15 @@ def test_integer_array_numpy_sum(values, expected): assert result == expected +@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"]) +def test_dataframe_reductions(op): + # https://github.com/pandas-dev/pandas/pull/32867 + # ensure the integers are not cast to float during reductions + df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")}) + result = df.max() + assert isinstance(result["a"], np.int64) + + # TODO(jreback) - these need testing / are broken # shift diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index db8bb5ca3c437..9d6b9f39a0578 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1303,3 +1303,26 @@ def test_preserve_timezone(self, initial: str, method): df = DataFrame([expected]) result = getattr(df, method)(axis=1) tm.assert_series_equal(result, expected) + + +def test_mixed_frame_with_integer_sum(): + # https://github.com/pandas-dev/pandas/issues/34520 + df = pd.DataFrame([["a", 1]], columns=list("ab")) + df = df.astype({"b": "Int64"}) + result = df.sum() + expected = pd.Series(["a", 1], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("numeric_only", [True, False, None]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_minmax_extensionarray(method, numeric_only): + # https://github.com/pandas-dev/pandas/issues/32651 + int64_info = np.iinfo("int64") + ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype()) + df = DataFrame({"Int64": ser}) + result = getattr(df, method)(numeric_only=numeric_only) + expected = Series( + [getattr(int64_info, method)], index=pd.Index(["Int64"], dtype="object") + ) + tm.assert_series_equal(result, expected) From 653f091f859c76948758b291d3546802dcb8cb96 Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Thu, 16 Jul 2020 00:19:12 +0200 Subject: [PATCH 137/632] BUG: Fix droped result column in groupby with as_index False (#33247) --- doc/source/whatsnew/v1.1.0.rst | 37 +++++++++++++++++++ pandas/core/groupby/generic.py | 8 ++-- .../tests/groupby/aggregate/test_aggregate.py | 35 ++++++++++++++++++ 3 files changed, 76 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 36433a45ede9f..90534c00df621 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -640,6 +640,43 @@ The method :meth:`core.DataFrameGroupBy.size` would previously ignore ``as_index df.groupby("a", as_index=False).size() +.. _whatsnew_110.api_breaking.groupby_results_lost_as_index_false: + +:meth:`DataFrameGroupby.agg` lost results with ``as_index`` ``False`` when relabeling columns +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Previously :meth:`DataFrameGroupby.agg` lost the result columns, when the ``as_index`` option was +set to ``False`` and the result columns were relabeled. In this case he result values were replaced with +the previous index (:issue:`32240`). + +.. ipython:: python + + df = pd.DataFrame({"key": ["x", "y", "z", "x", "y", "z"], + "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}) + df + +*Previous behavior*: + +.. code-block:: ipython + + In [2]: grouped = df.groupby("key", as_index=False) + In [3]: result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + In [4]: result + Out[4]: + min_val + 0 x + 1 y + 2 z + +*New behavior*: + +.. ipython:: python + + grouped = df.groupby("key", as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + result + + .. _whatsnew_110.notable_bug_fixes.apply_applymap_first_once: apply and applymap on ``DataFrame`` evaluates first row/column only once diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b9a8f3d5a5176..1d14361757e4a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -975,16 +975,16 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) [self._selected_obj.columns.name] * result.columns.nlevels ).droplevel(-1) - if not self.as_index: - self._insert_inaxis_grouper_inplace(result) - result.index = np.arange(len(result)) - if relabeling: # used reordered index of columns result = result.iloc[:, order] result.columns = columns + if not self.as_index: + self._insert_inaxis_grouper_inplace(result) + result.index = np.arange(len(result)) + return result._convert(datetime=True) agg = aggregate diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index bf465635c0085..40a20c8210052 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -795,6 +795,41 @@ def test_groupby_aggregate_empty_key_empty_return(): tm.assert_frame_equal(result, expected) +def test_grouby_agg_loses_results_with_as_index_false_relabel(): + # GH 32240: When the aggregate function relabels column names and + # as_index=False is specified, the results are dropped. + + df = pd.DataFrame( + {"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]} + ) + + grouped = df.groupby("key", as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + expected = pd.DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]}) + tm.assert_frame_equal(result, expected) + + +def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex(): + # GH 32240: When the aggregate function relabels column names and + # as_index=False is specified, the results are dropped. Check if + # multiindex is returned in the right order + + df = pd.DataFrame( + { + "key": ["x", "y", "x", "y", "x", "x"], + "key1": ["a", "b", "c", "b", "a", "c"], + "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75], + } + ) + + grouped = df.groupby(["key", "key1"], as_index=False) + result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min")) + expected = pd.DataFrame( + {"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]} + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)] ) From 1b2f5800520636a3c943321c6bf82710ce9d5378 Mon Sep 17 00:00:00 2001 From: avinashpancham <44933366+avinashpancham@users.noreply.github.com> Date: Thu, 16 Jul 2020 00:24:05 +0200 Subject: [PATCH 138/632] TST: Verify filtering operations on DataFrames with categorical Series (#35233) --- pandas/tests/frame/indexing/test_categorical.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py index cfc22b9b18729..314de5bdd8146 100644 --- a/pandas/tests/frame/indexing/test_categorical.py +++ b/pandas/tests/frame/indexing/test_categorical.py @@ -394,3 +394,14 @@ def test_loc_indexing_preserves_index_category_dtype(self): result = df.loc[["a"]].index.levels[0] tm.assert_index_equal(result, expected) + + def test_categorical_filtering(self): + # GH22609 Verify filtering operations on DataFrames with categorical Series + df = pd.DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"]) + df["b"] = df.b.astype("category") + + result = df.where(df.a > 0) + expected = df.copy() + expected.loc[0, :] = np.nan + + tm.assert_equal(result, expected) From a1e028f2866f5702c6a30cd2091f53c30d47a79d Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Thu, 16 Jul 2020 00:25:23 +0200 Subject: [PATCH 139/632] BUG/API: other object type check in Series/DataFrame.equals (#34402) --- doc/source/whatsnew/v1.1.0.rst | 2 ++ pandas/core/generic.py | 2 +- pandas/tests/frame/test_subclass.py | 8 ++++++++ pandas/tests/series/test_subclass.py | 8 ++++++++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 90534c00df621..f62e78831616b 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1200,6 +1200,8 @@ Other - Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`) - Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) - :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) +- Bug in :meth:`DataFrame.equals` and :meth:`Series.equals` in allowing subclasses + to be equal (:issue:`34402`). - Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) - Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`) - Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index eb55369d83593..e46fde1f59f16 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1278,7 +1278,7 @@ def equals(self, other): >>> df.equals(different_data_type) False """ - if not isinstance(other, self._constructor): + if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False return self._mgr.equals(other._mgr) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 08920cf7fceeb..2b462d5a10c51 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -696,3 +696,11 @@ def test_idxmax_preserves_subclass(self): df = tm.SubclassedDataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) result = df.idxmax() assert isinstance(result, tm.SubclassedSeries) + + def test_equals_subclass(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + df1 = pd.DataFrame({"a": [1, 2, 3]}) + df2 = tm.SubclassedDataFrame({"a": [1, 2, 3]}) + assert df1.equals(df2) + assert df2.equals(df1) diff --git a/pandas/tests/series/test_subclass.py b/pandas/tests/series/test_subclass.py index a596ed49c1df2..86330b7cc6993 100644 --- a/pandas/tests/series/test_subclass.py +++ b/pandas/tests/series/test_subclass.py @@ -51,3 +51,11 @@ def test_explode(self): s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]]) result = s.explode() assert isinstance(result, tm.SubclassedSeries) + + def test_equals(self): + # https://github.com/pandas-dev/pandas/pull/34402 + # allow subclass in both directions + s1 = pd.Series([1, 2, 3]) + s2 = tm.SubclassedSeries([1, 2, 3]) + assert s1.equals(s2) + assert s2.equals(s1) From 5119ebfb3cff9835521c0badd679b42a59b74006 Mon Sep 17 00:00:00 2001 From: Paul Sanders Date: Wed, 15 Jul 2020 21:31:45 -0400 Subject: [PATCH 140/632] TST: added tests for sparse and date range quantiles (#35236) * TST: added tests for sparse and date range quantiles * TST: updating quantile arguments --- pandas/tests/frame/methods/test_quantile.py | 39 ++++++++++++++++++--- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 0eec30cbc5c67..0b8f1e0495155 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -7,14 +7,29 @@ class TestDataFrameQuantile: - def test_quantile_sparse(self): + @pytest.mark.parametrize( + "df,expected", + [ + [ + pd.DataFrame( + { + 0: pd.Series(pd.arrays.SparseArray([1, 2])), + 1: pd.Series(pd.arrays.SparseArray([3, 4])), + } + ), + pd.Series([1.5, 3.5], name=0.5), + ], + [ + pd.DataFrame(pd.Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), + pd.Series([1.0], name=0.5), + ], + ], + ) + def test_quantile_sparse(self, df, expected): # GH#17198 - s = pd.Series(pd.arrays.SparseArray([1, 2])) - s1 = pd.Series(pd.arrays.SparseArray([3, 4])) - df = pd.DataFrame({0: s, 1: s1}) + # GH#24600 result = df.quantile() - expected = pd.Series([1.5, 3.5], name=0.5) tm.assert_series_equal(result, expected) def test_quantile(self, datetime_frame): @@ -59,6 +74,20 @@ def test_quantile(self, datetime_frame): expected = Series([3.0, 4.0], index=[0, 1], name=0.5) tm.assert_series_equal(result, expected) + def test_quantile_date_range(self): + # GH 2460 + + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + ser = pd.Series(dti) + df = pd.DataFrame(ser) + + result = df.quantile(numeric_only=False) + expected = pd.Series( + ["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]" + ) + + tm.assert_series_equal(result, expected) + def test_quantile_axis_mixed(self): # mixed on axis=1 From f0d57d932fe0f000de0b933812c35eca374ab854 Mon Sep 17 00:00:00 2001 From: Gabriel Tutui Date: Wed, 15 Jul 2020 22:51:18 -0300 Subject: [PATCH 141/632] Add date overflow message to tz_localize (#32967) (#35187) * Add date overflow message to tz_localize (#32967) * Delay evaluating timestamp --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/_libs/tslibs/conversion.pyx | 15 ++++++++++++--- pandas/tests/scalar/timestamp/test_timezones.py | 9 ++++++++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index f62e78831616b..97099ce73354f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -922,6 +922,7 @@ Datetimelike resolution which converted to object dtype instead of coercing to ``datetime64[ns]`` dtype when within the timestamp bounds (:issue:`34843`). - The ``freq`` keyword in :class:`Period`, :func:`date_range`, :func:`period_range`, :func:`pd.tseries.frequencies.to_offset` no longer allows tuples, pass as string instead (:issue:`34703`) +- ``OutOfBoundsDatetime`` issues an improved error message when timestamp is out of implementation bounds. (:issue:`32967`) Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 85da7a60a029a..8cc3d25e86340 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -639,11 +639,20 @@ cdef inline check_overflows(_TSObject obj): # GH#12677 if obj.dts.year == 1677: if not (obj.value < 0): - raise OutOfBoundsDatetime + from pandas._libs.tslibs.timestamps import Timestamp + fmt = (f"{obj.dts.year}-{obj.dts.month:02d}-{obj.dts.day:02d} " + f"{obj.dts.hour:02d}:{obj.dts.min:02d}:{obj.dts.sec:02d}") + raise OutOfBoundsDatetime( + f"Converting {fmt} underflows past {Timestamp.min}" + ) elif obj.dts.year == 2262: if not (obj.value > 0): - raise OutOfBoundsDatetime - + from pandas._libs.tslibs.timestamps import Timestamp + fmt = (f"{obj.dts.year}-{obj.dts.month:02d}-{obj.dts.day:02d} " + f"{obj.dts.hour:02d}:{obj.dts.min:02d}:{obj.dts.sec:02d}") + raise OutOfBoundsDatetime( + f"Converting {fmt} overflows past {Timestamp.max}" + ) # ---------------------------------------------------------------------- # Localization diff --git a/pandas/tests/scalar/timestamp/test_timezones.py b/pandas/tests/scalar/timestamp/test_timezones.py index 83764aa184392..f05f2054b2483 100644 --- a/pandas/tests/scalar/timestamp/test_timezones.py +++ b/pandas/tests/scalar/timestamp/test_timezones.py @@ -21,9 +21,12 @@ class TestTimestampTZOperations: # Timestamp.tz_localize def test_tz_localize_pushes_out_of_bounds(self): - msg = "^$" # GH#12677 # tz_localize that pushes away from the boundary is OK + msg = ( + f"Converting {Timestamp.min.strftime('%Y-%m-%d %H:%M:%S')} " + f"underflows past {Timestamp.min}" + ) pac = Timestamp.min.tz_localize("US/Pacific") assert pac.value > Timestamp.min.value pac.tz_convert("Asia/Tokyo") # tz_convert doesn't change value @@ -31,6 +34,10 @@ def test_tz_localize_pushes_out_of_bounds(self): Timestamp.min.tz_localize("Asia/Tokyo") # tz_localize that pushes away from the boundary is OK + msg = ( + f"Converting {Timestamp.max.strftime('%Y-%m-%d %H:%M:%S')} " + f"overflows past {Timestamp.max}" + ) tokyo = Timestamp.max.tz_localize("Asia/Tokyo") assert tokyo.value < Timestamp.max.value tokyo.tz_convert("US/Pacific") # tz_convert doesn't change value From 1b16a79c1743992c8cb816ed526e3a9e677c235b Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 16 Jul 2020 10:55:13 +0100 Subject: [PATCH 142/632] BUG: Inconsistent behavior in Index.difference (#35231) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/indexes/numeric.py | 22 ----------------- pandas/tests/indexes/test_numeric.py | 37 ++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 22 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 97099ce73354f..9f0318bfdb895 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -954,6 +954,7 @@ Numeric - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) +- Bug in :meth:`Index.difference` incorrect results when comparing a :class:`Float64Index` and object :class:`Index` (:issue:`35217`) - Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) Conversion diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 5020a25c88ff4..731907993d08f 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -400,28 +400,6 @@ def _format_native_types( ) return formatter.get_result_as_array() - def equals(self, other) -> bool: - """ - Determines if two Index objects contain the same elements. - """ - if self is other: - return True - - if not isinstance(other, Index): - return False - - # need to compare nans locations and make sure that they are the same - # since nans don't compare equal this is a bit tricky - try: - if not isinstance(other, Float64Index): - other = self._constructor(other) - if not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape: - return False - left, right = self._values, other._values - return ((left == right) | (self._isnan & other._isnan)).all() - except (TypeError, ValueError): - return False - def __contains__(self, other: Any) -> bool: hash(other) if super().__contains__(other): diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 33de0800658f2..a7c5734ef9b02 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -239,6 +239,19 @@ def test_equals_numeric(self): i2 = Float64Index([1.0, np.nan]) assert i.equals(i2) + @pytest.mark.parametrize( + "other", + ( + Int64Index([1, 2]), + Index([1.0, 2.0], dtype=object), + Index([1, 2], dtype=object), + ), + ) + def test_equals_numeric_other_index_type(self, other): + i = Float64Index([1.0, 2.0]) + assert i.equals(other) + assert other.equals(i) + @pytest.mark.parametrize( "vals", [ @@ -635,3 +648,27 @@ def test_uint_index_does_not_convert_to_float64(): tm.assert_index_equal(result.index, expected) tm.assert_equal(result, series[:3]) + + +def test_float64_index_equals(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = pd.Index([1.0, 2, 3]) + string_index = pd.Index(["1", "2", "3"]) + + result = float_index.equals(string_index) + assert result is False + + result = string_index.equals(float_index) + assert result is False + + +def test_float64_index_difference(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = pd.Index([1.0, 2, 3]) + string_index = pd.Index(["1", "2", "3"]) + + result = float_index.difference(string_index) + tm.assert_index_equal(result, float_index) + + result = string_index.difference(float_index) + tm.assert_index_equal(result, string_index) From 92c9e690adbfdc7ff1de5855f50b885b1f6e29f7 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 16 Jul 2020 06:17:06 -0500 Subject: [PATCH 143/632] Fix indexing, reindex on all-sparse SparseArray. (#35287) --- doc/source/whatsnew/v1.1.0.rst | 2 +- pandas/core/arrays/sparse/array.py | 19 +++++++++------ pandas/core/internals/blocks.py | 5 +--- pandas/tests/arrays/sparse/test_array.py | 5 ++++ pandas/tests/extension/base/getitem.py | 28 ---------------------- pandas/tests/extension/test_sparse.py | 5 ---- pandas/tests/frame/indexing/test_sparse.py | 20 ++++++++++++++++ 7 files changed, 39 insertions(+), 45 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 9f0318bfdb895..de3a05a2ccdfb 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1163,7 +1163,7 @@ Sparse - Creating a :class:`SparseArray` from timezone-aware dtype will issue a warning before dropping timezone information, instead of doing so silently (:issue:`32501`) - Bug in :meth:`arrays.SparseArray.from_spmatrix` wrongly read scipy sparse matrix (:issue:`31991`) - Bug in :meth:`Series.sum` with ``SparseArray`` raises ``TypeError`` (:issue:`25777`) -- Bug where :class:`DataFrame` containing :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`) +- Bug where :class:`DataFrame` containing an all-sparse :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`) - The repr of :class:`SparseDtype` now includes the repr of its ``fill_value`` attribute. Previously it used ``fill_value``'s string representation (:issue:`34352`) - Bug where empty :class:`DataFrame` could not be cast to :class:`SparseDtype` (:issue:`33113`) - Bug in :meth:`arrays.SparseArray` was returning the incorrect type when indexing a sparse dataframe with an iterable (:issue:`34526`, :issue:`34540`) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index b18a58da3950f..1d675b54a9c62 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -862,21 +862,26 @@ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: else: raise IndexError("cannot do a non-empty take from an empty axes.") + # sp_indexer may be -1 for two reasons + # 1.) we took for an index of -1 (new) + # 2.) we took a value that was self.fill_value (old) sp_indexer = self.sp_index.lookup_array(indices) + new_fill_indices = indices == -1 + old_fill_indices = (sp_indexer == -1) & ~new_fill_indices - if self.sp_index.npoints == 0: + if self.sp_index.npoints == 0 and old_fill_indices.all(): + # We've looked up all valid points on an all-sparse array. + taken = np.full( + sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype + ) + + elif self.sp_index.npoints == 0: # Avoid taking from the empty self.sp_values _dtype = np.result_type(self.dtype.subtype, type(fill_value)) taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) else: taken = self.sp_values.take(sp_indexer) - # sp_indexer may be -1 for two reasons - # 1.) we took for an index of -1 (new) - # 2.) we took a value that was self.fill_value (old) - new_fill_indices = indices == -1 - old_fill_indices = (sp_indexer == -1) & ~new_fill_indices - # Fill in two steps. # Old fill values # New fill values diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6a4b3318d3aa7..cc0f09ced7399 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1636,10 +1636,7 @@ def _holder(self): @property def fill_value(self): # Used in reindex_indexer - if is_sparse(self.values): - return self.values.dtype.fill_value - else: - return self.values.dtype.na_value + return self.values.dtype.na_value @property def _can_hold_na(self): diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index d0cdec712f39d..04215bfe1bedb 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -281,6 +281,11 @@ def test_take(self): exp = SparseArray(np.take(self.arr_data, [0, 1, 2])) tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp) + def test_take_all_empty(self): + a = pd.array([0, 0], dtype=pd.SparseDtype("int64")) + result = a.take([0, 1], allow_fill=True, fill_value=np.nan) + tm.assert_sp_array_equal(a, result) + def test_take_fill_value(self): data = np.array([1, np.nan, 0, 3, 0]) sparse = SparseArray(data, fill_value=0) diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index 5d0ea69007e27..251376798efc3 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -399,31 +399,3 @@ def test_item(self, data): with pytest.raises(ValueError, match=msg): s.item() - - def test_boolean_mask_frame_fill_value(self, data): - # https://github.com/pandas-dev/pandas/issues/27781 - df = pd.DataFrame({"A": data}) - - mask = np.random.choice([True, False], df.shape[0]) - result = pd.isna(df.iloc[mask]["A"]) - expected = pd.isna(df["A"].iloc[mask]) - self.assert_series_equal(result, expected) - - mask = pd.Series(mask, index=df.index) - result = pd.isna(df.loc[mask]["A"]) - expected = pd.isna(df["A"].loc[mask]) - self.assert_series_equal(result, expected) - - def test_fancy_index_frame_fill_value(self, data): - # https://github.com/pandas-dev/pandas/issues/29563 - df = pd.DataFrame({"A": data}) - - mask = np.random.choice(df.shape[0], df.shape[0]) - result = pd.isna(df.iloc[mask]["A"]) - expected = pd.isna(df["A"].iloc[mask]) - self.assert_series_equal(result, expected) - - mask = pd.Series(mask, index=df.index) - result = pd.isna(df.loc[mask]["A"]) - expected = pd.isna(df["A"].loc[mask]) - self.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 68e521b005c02..b411ca1c482a4 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -41,11 +41,6 @@ def data_for_twos(request): return SparseArray(np.ones(100) * 2) -@pytest.fixture(params=[0, np.nan]) -def data_zeros(request): - return SparseArray(np.zeros(100, dtype=int), fill_value=request.param) - - @pytest.fixture(params=[0, np.nan]) def data_missing(request): """Length 2 array with [NA, Valid]""" diff --git a/pandas/tests/frame/indexing/test_sparse.py b/pandas/tests/frame/indexing/test_sparse.py index 876fbe212c466..04e1c8b94c4d9 100644 --- a/pandas/tests/frame/indexing/test_sparse.py +++ b/pandas/tests/frame/indexing/test_sparse.py @@ -49,3 +49,23 @@ def test_locindexer_from_spmatrix(self, spmatrix_t, dtype): result = df.loc[itr_idx].dtypes.values expected = np.full(cols, SparseDtype(dtype, fill_value=0)) tm.assert_numpy_array_equal(result, expected) + + def test_reindex(self): + # https://github.com/pandas-dev/pandas/issues/35286 + df = pd.DataFrame( + {"A": [0, 1], "B": pd.array([0, 1], dtype=pd.SparseDtype("int64", 0))} + ) + result = df.reindex([0, 2]) + expected = pd.DataFrame( + { + "A": [0.0, np.nan], + "B": pd.array([0.0, np.nan], dtype=pd.SparseDtype("float64", 0.0)), + }, + index=[0, 2], + ) + tm.assert_frame_equal(result, expected) + + def test_all_sparse(self): + df = pd.DataFrame({"A": pd.array([0, 0], dtype=pd.SparseDtype("int64"))}) + result = df.loc[[0, 1]] + tm.assert_frame_equal(result, df) From b4a7e65b3ba3ce2cdd6733f8917d7110bf339086 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 16 Jul 2020 08:04:53 -0500 Subject: [PATCH 144/632] TST: xfail more 32-bits (#35304) --- pandas/tests/window/test_rolling.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 8d72e2cb92ca9..bea239a245a4f 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -7,7 +7,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Series, date_range +from pandas import DataFrame, Series, compat, date_range import pandas._testing as tm from pandas.core.window import Rolling @@ -150,6 +150,7 @@ def test_closed_one_entry(func): @pytest.mark.parametrize("func", ["min", "max"]) +@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_closed_one_entry_groupby(func): # GH24718 ser = pd.DataFrame( @@ -682,6 +683,7 @@ def test_iter_rolling_datetime(expected, expected_index, window): ), ], ) +@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_positional_argument(grouping, _index, raw): # GH 34605 From a154bf9bd11801d6a35bdb6b9f3ba6da90fe85cd Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 16 Jul 2020 09:56:08 -0500 Subject: [PATCH 145/632] Revert BUG: Ensure same index is returned for slow and fast path in groupby.apply #31613 (#35306) xref https://github.com/pandas-dev/pandas/pull/34998 --- doc/source/whatsnew/v1.1.0.rst | 4 ---- pandas/_libs/reduction.pyx | 2 +- pandas/tests/groupby/test_apply.py | 8 ++++++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index de3a05a2ccdfb..cee3680b4bf65 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1114,10 +1114,6 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`) - Bug in :meth:`DataFrame.groupby` where a ``ValueError`` would be raised when grouping by a categorical column with read-only categories and ``sort=False`` (:issue:`33410`) - Bug in :meth:`GroupBy.agg`, :meth:`GroupBy.transform`, and :meth:`GroupBy.resample` where subclasses are not preserved (:issue:`28330`) -- Bug in :meth:`core.groupby.DataFrameGroupBy.apply` where the output index shape for functions returning a DataFrame which is equally indexed - to the input DataFrame is inconsistent. An internal heuristic to detect index mutation would behave differently for equal but not identical - indices. In particular, the result index shape might change if a copy of the input would be returned. - The behaviour now is consistent, independent of internal heuristics. (:issue:`31612`, :issue:`14927`, :issue:`13056`) - Bug in :meth:`SeriesGroupBy.agg` where any column name was accepted in the named aggregation of ``SeriesGroupBy`` previously. The behaviour now allows only ``str`` and callables else would raise ``TypeError``. (:issue:`34422`) - Bug in :meth:`DataFrame.groupby` lost index, when one of the ``agg`` keys referenced an empty list (:issue:`32580`) - Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`) diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 97c491776f831..a01e0c5705dcf 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -366,7 +366,7 @@ def apply_frame_axis0(object frame, object f, object names, # Need to infer if low level index slider will cause segfaults require_slow_apply = i == 0 and piece is chunk try: - if not piece.index.equals(chunk.index): + if not piece.index is chunk.index: mutated = True except AttributeError: # `piece` might not have an index, could be e.g. an int diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index aa10f44670361..5a1268bfb03db 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -211,6 +211,7 @@ def test_group_apply_once_per_group2(capsys): assert result == expected +@pytest.mark.xfail(reason="GH-34998") def test_apply_fast_slow_identical(): # GH 31613 @@ -234,9 +235,11 @@ def fast(group): "func", [ lambda x: x, - lambda x: x[:], + pytest.param(lambda x: x[:], marks=pytest.mark.xfail(reason="GH-34998")), lambda x: x.copy(deep=False), - lambda x: x.copy(deep=True), + pytest.param( + lambda x: x.copy(deep=True), marks=pytest.mark.xfail(reason="GH-34998") + ), ], ) def test_groupby_apply_identity_maybecopy_index_identical(func): @@ -997,6 +1000,7 @@ def test_apply_function_with_indexing_return_column(): tm.assert_frame_equal(result, expected) +@pytest.mark.xfail(reason="GH-34998") def test_apply_with_timezones_aware(): # GH: 27212 From 4cf8c5fd4dbcf6318c34fa900e98e7393dca1987 Mon Sep 17 00:00:00 2001 From: vivikelapoutre <31180320+vivikelapoutre@users.noreply.github.com> Date: Thu, 16 Jul 2020 18:08:54 +0200 Subject: [PATCH 146/632] BUG: Groupby with as_index=True causes incorrect summarization (#34906) * add test * PR comments * attempt to make the code cleaner --- pandas/tests/groupby/test_function.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 6f19ec40c2520..e693962e57ac3 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -85,6 +85,24 @@ def test_max_min_non_numeric(): assert "ss" in result +def test_min_date_with_nans(): + # GH26321 + dates = pd.to_datetime( + pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d" + ).dt.date + df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) + + result = df.groupby("b", as_index=False)["c"].min()["c"] + expected = pd.to_datetime( + pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d" + ).dt.date + tm.assert_series_equal(result, expected) + + result = df.groupby("b")["c"].min() + expected.index.name = "b" + tm.assert_series_equal(result, expected) + + def test_intercept_builtin_sum(): s = Series([1.0, 2.0, np.nan, 3.0]) grouped = s.groupby([0, 1, 2, 2]) From 5e45f4f6dd253fa585076ea3149ef4e07fed5d0f Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 16 Jul 2020 14:38:59 -0500 Subject: [PATCH 147/632] TST: Remove deprecated use of apply_index (#35298) --- .../offsets/test_offsets_properties.py | 29 ------------------- 1 file changed, 29 deletions(-) diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py index 81465e733da85..ca14b202ef888 100644 --- a/pandas/tests/tseries/offsets/test_offsets_properties.py +++ b/pandas/tests/tseries/offsets/test_offsets_properties.py @@ -12,7 +12,6 @@ from hypothesis import assume, given, strategies as st from hypothesis.extra.dateutil import timezones as dateutil_timezones from hypothesis.extra.pytz import timezones as pytz_timezones -import pytest import pandas as pd from pandas import Timestamp @@ -95,34 +94,6 @@ def test_on_offset_implementations(dt, offset): assert offset.is_on_offset(dt) == (compare == dt) -@pytest.mark.xfail( - reason="res_v2 below is incorrect, needs to use the " - "commented-out version with tz_localize. " - "But with that fix in place, hypothesis then " - "has errors in timezone generation." -) -@given(gen_yqm_offset, gen_date_range) -def test_apply_index_implementations(offset, rng): - # offset.apply_index(dti)[i] should match dti[i] + offset - assume(offset.n != 0) # TODO: test for that case separately - - # rng = pd.date_range(start='1/1/2000', periods=100000, freq='T') - ser = pd.Series(rng) - - res = rng + offset - res_v2 = offset.apply_index(rng) - # res_v2 = offset.apply_index(rng.tz_localize(None)).tz_localize(rng.tz) - assert (res == res_v2).all() - - assert res[0] == rng[0] + offset - assert res[-1] == rng[-1] + offset - res2 = ser + offset - # apply_index is only for indexes, not series, so no res2_v2 - assert res2.iloc[0] == ser.iloc[0] + offset - assert res2.iloc[-1] == ser.iloc[-1] + offset - # TODO: Check randomly assorted entries, not just first/last - - @given(gen_yqm_offset) def test_shift_across_dst(offset): # GH#18319 check that 1) timezone is correctly normalized and From cd5e17c1a537a8ce8e786d308cebc64e1a2c6bda Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 16 Jul 2020 17:43:07 -0500 Subject: [PATCH 148/632] pin numpy (#35312) --- environment.yml | 3 ++- requirements-dev.txt | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 32ff8c91cb69c..53106906a52cb 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,8 @@ channels: - conda-forge dependencies: # required - - numpy>=1.15 + # Pin numpy<1.19 until MPL 3.3.0 is released. + - numpy>=1.15,<1.19.0 - python=3 - python-dateutil>=2.7.3 - pytz diff --git a/requirements-dev.txt b/requirements-dev.txt index 3cda38d4b72f5..1ec998ffa72d4 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ # This file is auto-generated from environment.yml, do not modify. # See that file for comments about the need/usage of each dependency. -numpy>=1.15 +numpy>=1.15,<1.19.0 python-dateutil>=2.7.3 pytz asv From fe1ea14910c9afec638114fce12308cdf26784dd Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 16 Jul 2020 23:44:18 +0100 Subject: [PATCH 149/632] CLN: consistent EA._reduce signatures (#35308) --- pandas/core/arrays/base.py | 2 +- pandas/core/arrays/categorical.py | 4 ++-- pandas/core/arrays/datetimelike.py | 2 +- pandas/core/arrays/sparse/array.py | 2 +- pandas/core/arrays/string_.py | 2 +- pandas/tests/extension/arrow/arrays.py | 4 ++-- pandas/tests/extension/decimal/array.py | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 32a2a30fcfd43..2553a65aed07b 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1120,7 +1120,7 @@ def _concat_same_type( # of objects _can_hold_na = True - def _reduce(self, name, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): """ Return a scalar result of performing the reduction operation. diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 1fedfa70cc469..db9cfd9d7fc59 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2076,11 +2076,11 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: return result # reduction ops # - def _reduce(self, name, axis=0, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): func = getattr(self, name, None) if func is None: raise TypeError(f"Categorical cannot perform the operation {name}") - return func(**kwargs) + return func(skipna=skipna, **kwargs) @deprecate_kwarg(old_arg_name="numeric_only", new_arg_name="skipna") def min(self, skipna=True, **kwargs): diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index a306268cd8ede..ee4d43fdb3bc2 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1552,7 +1552,7 @@ def __isub__(self, other): # -------------------------------------------------------------- # Reductions - def _reduce(self, name, axis=0, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): op = getattr(self, name, None) if op: return op(skipna=skipna, **kwargs) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 1d675b54a9c62..d8db196e4b92f 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1164,7 +1164,7 @@ def nonzero(self): # Reductions # ------------------------------------------------------------------------ - def _reduce(self, name, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): method = getattr(self, name, None) if method is None: diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 5104e3f12f5b4..fddd3af858f77 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -291,7 +291,7 @@ def astype(self, dtype, copy=True): return super().astype(dtype, copy) - def _reduce(self, name, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): if name in ["min", "max"]: return getattr(self, name)(skipna=skipna) diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index 29cfe1e0fe606..8a18f505058bc 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -162,14 +162,14 @@ def _concat_same_type(cls, to_concat): def __invert__(self): return type(self).from_scalars(~self._data.to_pandas()) - def _reduce(self, method, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): if skipna: arr = self[~self.isna()] else: arr = self try: - op = getattr(arr, method) + op = getattr(arr, name) except AttributeError as err: raise TypeError from err return op(**kwargs) diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 4d5be75ff8200..2fbeec8dd8378 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -174,7 +174,7 @@ def _formatter(self, boxed=False): def _concat_same_type(cls, to_concat): return cls(np.concatenate([x._data for x in to_concat])) - def _reduce(self, name, skipna=True, **kwargs): + def _reduce(self, name: str, skipna: bool = True, **kwargs): if skipna: # If we don't have any NAs, we can ignore skipna From 88d81cc9919ba2ca0f00c98eb33286b437ab3c09 Mon Sep 17 00:00:00 2001 From: Karthik Mathur <22126205+mathurk1@users.noreply.github.com> Date: Thu, 16 Jul 2020 17:45:57 -0500 Subject: [PATCH 150/632] TST add test for dtype consistency with pd replace #23305 (#35234) --- pandas/tests/frame/methods/test_replace.py | 80 ++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index ea72a3d8fef4d..a3f056dbf9648 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1493,3 +1493,83 @@ def test_replace_period_ignore_float(self): result = df.replace(1.0, 0.0) expected = pd.DataFrame({"Per": [pd.Period("2020-01")] * 3}) tm.assert_frame_equal(expected, result) + + def test_replace_value_category_type(self): + """ + Test for #23305: to ensure category dtypes are maintained + after replace with direct values + """ + + # create input data + input_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "d"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "cat2", "cat3", "cat4"], + "col5": ["obj1", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + input_df = pd.DataFrame(data=input_dict).astype( + {"col2": "category", "col4": "category"} + ) + input_df["col2"] = input_df["col2"].cat.reorder_categories( + ["a", "b", "c", "d"], ordered=True + ) + input_df["col4"] = input_df["col4"].cat.reorder_categories( + ["cat1", "cat2", "cat3", "cat4"], ordered=True + ) + + # create expected dataframe + expected_dict = { + "col1": [1, 2, 3, 4], + "col2": ["a", "b", "c", "z"], + "col3": [1.5, 2.5, 3.5, 4.5], + "col4": ["cat1", "catX", "cat3", "cat4"], + "col5": ["obj9", "obj2", "obj3", "obj4"], + } + # explicitly cast columns as category and order them + expected = pd.DataFrame(data=expected_dict).astype( + {"col2": "category", "col4": "category"} + ) + expected["col2"] = expected["col2"].cat.reorder_categories( + ["a", "b", "c", "z"], ordered=True + ) + expected["col4"] = expected["col4"].cat.reorder_categories( + ["cat1", "catX", "cat3", "cat4"], ordered=True + ) + + # replace values in input dataframe + input_df = input_df.replace("d", "z") + input_df = input_df.replace("obj1", "obj9") + result = input_df.replace("cat2", "catX") + + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail( + reason="category dtype gets changed to object type after replace, see #35268", + strict=True, + ) + def test_replace_dict_category_type(self, input_category_df, expected_category_df): + """ + Test to ensure category dtypes are maintained + after replace with dict values + """ + + # create input dataframe + input_dict = {"col1": ["a"], "col2": ["obj1"], "col3": ["cat1"]} + # explicitly cast columns as category + input_df = pd.DataFrame(data=input_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # create expected dataframe + expected_dict = {"col1": ["z"], "col2": ["obj9"], "col3": ["catX"]} + # explicitly cast columns as category + expected = pd.DataFrame(data=expected_dict).astype( + {"col1": "category", "col2": "category", "col3": "category"} + ) + + # replace values in input dataframe using a dict + result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) + + tm.assert_frame_equal(result, expected) From 697a5381bf9d11f4a9b7eebe51dc9260a01390da Mon Sep 17 00:00:00 2001 From: rhshadrach <45562402+rhshadrach@users.noreply.github.com> Date: Thu, 16 Jul 2020 18:48:29 -0400 Subject: [PATCH 151/632] BUG: DataFrameGroupBy.quantile raises for non-numeric dtypes rather than dropping columns (#34756) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/groupby/groupby.py | 17 ++++++++++++++--- pandas/tests/groupby/test_quantile.py | 8 ++++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index cee3680b4bf65..7b28eb79e433c 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1118,6 +1118,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.groupby` lost index, when one of the ``agg`` keys referenced an empty list (:issue:`32580`) - Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`) - Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`) +- Bug in :meth:`core.groupby.DataFrameGroupBy.quantile` raises ``TypeError`` for non-numeric types rather than dropping columns (:issue:`27892`) - Bug in :meth:`core.groupby.DataFrameGroupBy.transform` when ``func='nunique'`` and columns are of type ``datetime64``, the result would also be of type ``datetime64`` instead of ``int64`` (:issue:`35109`) - Bug in :meth:'DataFrameGroupBy.first' and :meth:'DataFrameGroupBy.last' that would raise an unnecessary ``ValueError`` when grouping on multiple ``Categoricals`` (:issue:`34951`) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 65483abbd2a6e..ac45222625569 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2403,7 +2403,7 @@ def _get_cythonized_result( signature needs_2d : bool, default False Whether the values and result of the Cython call signature - are at least 2-dimensional. + are 2-dimensional. min_count : int, default None When not None, min_count for the Cython call needs_mask : bool, default False @@ -2419,7 +2419,9 @@ def _get_cythonized_result( Function should return a tuple where the first element is the values to be passed to Cython and the second element is an optional type which the values should be converted to after being returned - by the Cython operation. Raises if `needs_values` is False. + by the Cython operation. This function is also responsible for + raising a TypeError if the values have an invalid type. Raises + if `needs_values` is False. post_processing : function, default None Function to be applied to result of Cython function. Should accept an array of values as the first argument and type inferences as its @@ -2451,6 +2453,7 @@ def _get_cythonized_result( output: Dict[base.OutputKey, np.ndarray] = {} base_func = getattr(libgroupby, how) + error_msg = "" for idx, obj in enumerate(self._iterate_slices()): name = obj.name values = obj._values @@ -2477,7 +2480,11 @@ def _get_cythonized_result( if needs_values: vals = values if pre_processing: - vals, inferences = pre_processing(vals) + try: + vals, inferences = pre_processing(vals) + except TypeError as e: + error_msg = str(e) + continue if needs_2d: vals = vals.reshape((-1, 1)) vals = vals.astype(cython_dtype, copy=False) @@ -2509,6 +2516,10 @@ def _get_cythonized_result( key = base.OutputKey(label=name, position=idx) output[key] = result + # error_msg is "" on an frame/series with no rows or columns + if len(output) == 0 and error_msg != "": + raise TypeError(error_msg) + if aggregate: return self._wrap_aggregated_output(output) else: diff --git a/pandas/tests/groupby/test_quantile.py b/pandas/tests/groupby/test_quantile.py index 8cfd8035502c3..9338742195bfe 100644 --- a/pandas/tests/groupby/test_quantile.py +++ b/pandas/tests/groupby/test_quantile.py @@ -232,3 +232,11 @@ def test_groupby_quantile_nullable_array(values, q): expected = pd.Series(true_quantiles * 2, index=idx, name="b") tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]]) +def test_groupby_quantile_skips_invalid_dtype(q): + df = pd.DataFrame({"a": [1], "b": [2.0], "c": ["x"]}) + result = df.groupby("a").quantile(q) + expected = df.groupby("a")[["b"]].quantile(q) + tm.assert_frame_equal(result, expected) From d396111eed69a11bab8c1f9078bb34014eb31dc0 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 16 Jul 2020 17:49:13 -0500 Subject: [PATCH 152/632] Fixed reindexing arith with duplicates (#35303) Closes https://github.com/pandas-dev/pandas/issues/35194 --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/ops/__init__.py | 23 +++++++++++++++++++---- pandas/tests/frame/test_arithmetic.py | 9 +++++++++ 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7b28eb79e433c..6434fe4042ffc 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -953,6 +953,7 @@ Numeric - Bug in :meth:`DataFrame.count` with ``level="foo"`` and index level ``"foo"`` containing NaNs causes segmentation fault (:issue:`21824`) - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) +- Bug in arithmetic operations between ``DataFrame`` objects with non-overlapping columns with duplicate labels causing an infinite loop (:issue:`35194`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) - Bug in :meth:`Index.difference` incorrect results when comparing a :class:`Float64Index` and object :class:`Index` (:issue:`35217`) - Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 5dd94a8af74ac..60f3d23aaed13 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -17,6 +17,7 @@ from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna +from pandas.core import algorithms from pandas.core.construction import extract_array from pandas.core.ops.array_ops import ( arithmetic_op, @@ -562,10 +563,12 @@ def _frame_arith_method_with_reindex( DataFrame """ # GH#31623, only operate on shared columns - cols = left.columns.intersection(right.columns) + cols, lcols, rcols = left.columns.join( + right.columns, how="inner", level=None, return_indexers=True + ) - new_left = left[cols] - new_right = right[cols] + new_left = left.iloc[:, lcols] + new_right = right.iloc[:, rcols] result = op(new_left, new_right) # Do the join on the columns instead of using _align_method_FRAME @@ -573,7 +576,19 @@ def _frame_arith_method_with_reindex( join_columns, _, _ = left.columns.join( right.columns, how="outer", level=None, return_indexers=True ) - return result.reindex(join_columns, axis=1) + + if result.columns.has_duplicates: + # Avoid reindexing with a duplicate axis. + # https://github.com/pandas-dev/pandas/issues/35194 + indexer, _ = result.columns.get_indexer_non_unique(join_columns) + indexer = algorithms.unique1d(indexer) + result = result._reindex_with_indexers( + {1: [join_columns, indexer]}, allow_dups=True + ) + else: + result = result.reindex(join_columns, axis=1) + + return result def _maybe_align_series_as_frame(frame: "DataFrame", series: "Series", axis: int): diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index a6b0ece58b095..e17357e9845b5 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1552,3 +1552,12 @@ def test_dataframe_operation_with_non_numeric_types(df, col_dtype): expected = expected.astype({"b": col_dtype}) result = df + pd.Series([-1.0], index=list("a")) tm.assert_frame_equal(result, expected) + + +def test_arith_reindex_with_duplicates(): + # https://github.com/pandas-dev/pandas/issues/35194 + df1 = pd.DataFrame(data=[[0]], columns=["second"]) + df2 = pd.DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"]) + result = df1 + df2 + expected = pd.DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"]) + tm.assert_frame_equal(result, expected) From 65090286608602ba6d67ca4a29cf0535156cd509 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 16 Jul 2020 23:50:38 +0100 Subject: [PATCH 153/632] BUG: DataFrame.append with empty DataFrame and Series with tz-aware datetime value allocated object column (#35038) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/dtypes/concat.py | 4 ++-- pandas/core/internals/concat.py | 2 +- pandas/tests/reshape/test_concat.py | 29 ++++++++++++++++++----------- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 6434fe4042ffc..df41b24ee5097 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -922,6 +922,7 @@ Datetimelike resolution which converted to object dtype instead of coercing to ``datetime64[ns]`` dtype when within the timestamp bounds (:issue:`34843`). - The ``freq`` keyword in :class:`Period`, :func:`date_range`, :func:`period_range`, :func:`pd.tseries.frequencies.to_offset` no longer allows tuples, pass as string instead (:issue:`34703`) +- Bug in :meth:`DataFrame.append` when appending a :class:`Series` containing a scalar tz-aware :class:`Timestamp` to an empty :class:`DataFrame` resulted in an object column instead of datetime64[ns, tz] dtype (:issue:`35038`) - ``OutOfBoundsDatetime`` issues an improved error message when timestamp is out of implementation bounds. (:issue:`32967`) Timedelta diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4b7c818f487ac..9902016475b22 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -152,11 +152,11 @@ def is_nonempty(x) -> bool: target_dtype = find_common_type([x.dtype for x in to_concat]) to_concat = [_cast_to_common_type(arr, target_dtype) for arr in to_concat] - if isinstance(to_concat[0], ExtensionArray): + if isinstance(to_concat[0], ExtensionArray) and axis == 0: cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: - return np.concatenate(to_concat) + return np.concatenate(to_concat, axis=axis) elif _contains_datetime or "timedelta" in typs: return concat_datetime(to_concat, axis=axis, typs=typs) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 2cc7461986c8f..2c0d4931a7bf2 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -333,7 +333,7 @@ def _concatenate_join_units(join_units, concat_axis, copy): # concatting with at least one EA means we are concatting a single column # the non-EA values are 2D arrays with shape (1, n) to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat] - concat_values = concat_compat(to_concat, axis=concat_axis) + concat_values = concat_compat(to_concat, axis=0) if not isinstance(concat_values, ExtensionArray): # if the result of concat is not an EA but an ndarray, reshape to # 2D to put it a non-EA Block diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ffeb5ff0f8aaa..0159fabd04d59 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1087,20 +1087,27 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc()) s = Series({"date": date, "a": 1.0, "b": 2.0}) df = DataFrame(columns=["c", "d"]) - result = df.append(s, ignore_index=True) - # n.b. it's not clear to me that expected is correct here. - # It's possible that the `date` column should have - # datetime64[ns, tz] dtype for both result and expected. - # that would be more consistent with new columns having - # their own dtype (float for a and b, datetime64ns, tz for date). + result_a = df.append(s, ignore_index=True) expected = DataFrame( - [[np.nan, np.nan, 1.0, 2.0, date]], - columns=["c", "d", "a", "b", "date"], - dtype=object, + [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"] ) # These columns get cast to object after append - expected["a"] = expected["a"].astype(float) - expected["b"] = expected["b"].astype(float) + expected["c"] = expected["c"].astype(object) + expected["d"] = expected["d"].astype(object) + tm.assert_frame_equal(result_a, expected) + + expected = DataFrame( + [[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"] + ) + expected["c"] = expected["c"].astype(object) + expected["d"] = expected["d"].astype(object) + + result_b = result_a.append(s, ignore_index=True) + tm.assert_frame_equal(result_b, expected) + + # column order is different + expected = expected[["c", "d", "date", "a", "b"]] + result = df.append([s, s], ignore_index=True) tm.assert_frame_equal(result, expected) From 05a41bc879a28b653277635b84c36ecc4f8e3173 Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Fri, 17 Jul 2020 03:25:50 -0700 Subject: [PATCH 154/632] DOC: extra closing parens make example invalid. (#35316) --- pandas/_libs/lib.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 37d83a73c6597..5ecbb2c3ffd35 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -983,7 +983,7 @@ def is_list_like(obj: object, allow_sets: bool = True) -> bool: False >>> is_list_like(np.array([2])) True - >>> is_list_like(np.array(2))) + >>> is_list_like(np.array(2)) False """ return c_is_list_like(obj, allow_sets) From d660810e6e1b7200c4706bddf48de5f22f4a982f Mon Sep 17 00:00:00 2001 From: Joseph Gulian Date: Fri, 17 Jul 2020 06:37:13 -0400 Subject: [PATCH 155/632] TST: Adds CategoricalIndex DataFrame from_records test (GH32805) (#35055) * TST: Adds CategoricalIndex DataFrame from_records test (GH32805) * TST: Reformats CategoricalIndex DataFrame from_records test * TST: Removes comma from CategoricalIndex DataFrame from_records test * TST: Isorts tests/frame/test_constructor.py imports --- pandas/tests/frame/test_constructors.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 1631342c359c1..a4ed548264d39 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -19,6 +19,7 @@ import pandas as pd from pandas import ( Categorical, + CategoricalIndex, DataFrame, Index, Interval, @@ -2509,6 +2510,18 @@ def test_from_records_series_list_dict(self): result = DataFrame.from_records(data) tm.assert_frame_equal(result, expected) + def test_from_records_series_categorical_index(self): + # GH 32805 + index = CategoricalIndex( + [pd.Interval(-20, -10), pd.Interval(-10, 0), pd.Interval(0, 10)] + ) + series_of_dicts = pd.Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) + frame = pd.DataFrame.from_records(series_of_dicts, index=index) + expected = DataFrame( + {"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index + ) + tm.assert_frame_equal(frame, expected) + def test_frame_from_records_utc(self): rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)} From 4da8622484331184b147bffa9a0457c65a95c653 Mon Sep 17 00:00:00 2001 From: willbowditch <14288042+willbowditch@users.noreply.github.com> Date: Fri, 17 Jul 2020 11:58:29 +0100 Subject: [PATCH 156/632] Infer compression if file extension is uppercase (#35164) * Infer compression even if file extension is uppercase * Add upercase extensions to infer compression tests * Update whatsnew * Add PR number --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/io/common.py | 2 +- pandas/tests/io/test_common.py | 12 +++++++++++- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index df41b24ee5097..bdb844ded59b7 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1086,6 +1086,7 @@ I/O - Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) - :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) +- Bug where capitalised files extensions were not decompressed by read_* functions (:issue:`35164`) - Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) - Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) - :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) diff --git a/pandas/io/common.py b/pandas/io/common.py index 32ec088f00d88..bd77a1e69c138 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -339,7 +339,7 @@ def infer_compression( # Infer compression from the filename/URL extension for compression, extension in _compression_to_extension.items(): - if filepath_or_buffer.endswith(extension): + if filepath_or_buffer.lower().endswith(extension): return compression return None diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index e2f4ae04c1f9f..dde38eb55ea7f 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -87,7 +87,17 @@ def test_stringify_path_fspath(self): @pytest.mark.parametrize( "extension,expected", - [("", None), (".gz", "gzip"), (".bz2", "bz2"), (".zip", "zip"), (".xz", "xz")], + [ + ("", None), + (".gz", "gzip"), + (".bz2", "bz2"), + (".zip", "zip"), + (".xz", "xz"), + (".GZ", "gzip"), + (".BZ2", "bz2"), + (".ZIP", "zip"), + (".XZ", "xz"), + ], ) @pytest.mark.parametrize("path_type", path_types) def test_infer_compression_from_path(self, extension, expected, path_type): From 1fa37477218f0811e963da9198c10a87d5fba50e Mon Sep 17 00:00:00 2001 From: rhshadrach <45562402+rhshadrach@users.noreply.github.com> Date: Fri, 17 Jul 2020 08:33:04 -0400 Subject: [PATCH 157/632] CLN/DOC: DataFrame.to_parquet supports file-like objects (#35235) --- pandas/core/frame.py | 21 +++++++++++++-------- pandas/io/parquet.py | 29 +++++++++++++++++------------ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b63467f08cdaa..f52341ed782d8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -19,6 +19,7 @@ IO, TYPE_CHECKING, Any, + AnyStr, Dict, FrozenSet, Hashable, @@ -2266,11 +2267,11 @@ def to_markdown( @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") def to_parquet( self, - path, - engine="auto", - compression="snappy", - index=None, - partition_cols=None, + path: FilePathOrBuffer[AnyStr], + engine: str = "auto", + compression: Optional[str] = "snappy", + index: Optional[bool] = None, + partition_cols: Optional[List[str]] = None, **kwargs, ) -> None: """ @@ -2283,9 +2284,12 @@ def to_parquet( Parameters ---------- - path : str - File path or Root Directory path. Will be used as Root Directory - path while writing a partitioned dataset. + path : str or file-like object + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handler + (e.g. via builtin open function) or io.BytesIO. The engine + fastparquet does not accept file-like objects. .. versionchanged:: 1.0.0 @@ -2312,6 +2316,7 @@ def to_parquet( partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. + Must be None if path is not a string. .. versionadded:: 0.24.0 diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index a0c9242684f0f..8c4b63767ac06 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -1,8 +1,9 @@ """ parquet compat """ -from typing import Any, Dict, Optional +from typing import Any, AnyStr, Dict, List, Optional from warnings import catch_warnings +from pandas._typing import FilePathOrBuffer from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError @@ -85,10 +86,10 @@ def __init__(self): def write( self, df: DataFrame, - path, - compression="snappy", + path: FilePathOrBuffer[AnyStr], + compression: Optional[str] = "snappy", index: Optional[bool] = None, - partition_cols=None, + partition_cols: Optional[List[str]] = None, **kwargs, ): self.validate_dataframe(df) @@ -213,11 +214,11 @@ def read(self, path, columns=None, **kwargs): def to_parquet( df: DataFrame, - path, + path: FilePathOrBuffer[AnyStr], engine: str = "auto", - compression="snappy", + compression: Optional[str] = "snappy", index: Optional[bool] = None, - partition_cols=None, + partition_cols: Optional[List[str]] = None, **kwargs, ): """ @@ -226,9 +227,12 @@ def to_parquet( Parameters ---------- df : DataFrame - path : str - File path or Root Directory path. Will be used as Root Directory path - while writing a partitioned dataset. + path : str or file-like object + If a string, it will be used as Root Directory path + when writing a partitioned dataset. By file-like object, + we refer to objects with a write() method, such as a file handler + (e.g. via builtin open function) or io.BytesIO. The engine + fastparquet does not accept file-like objects. .. versionchanged:: 0.24.0 @@ -251,8 +255,9 @@ def to_parquet( .. versionadded:: 0.24.0 partition_cols : str or list, optional, default None - Column names by which to partition the dataset - Columns are partitioned in the order they are given + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + Must be None if path is not a string. .. versionadded:: 0.24.0 From 7ae3098e3ae2ec9101a9627d2e28666de9c158b0 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Fri, 17 Jul 2020 08:25:27 -0500 Subject: [PATCH 158/632] CI: Skip test for 3.7.0 exactly (#35310) xref https://github.com/pandas-dev/pandas/issues/35309 --- pandas/tests/io/json/test_json_table_schema.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index df64af6ac2265..22b4ec189a0f1 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -1,6 +1,7 @@ """Tests for Table Schema integration.""" from collections import OrderedDict import json +import sys import numpy as np import pytest @@ -671,6 +672,7 @@ class TestTableOrientReader: {"bools": [True, False, False, True]}, ], ) + @pytest.mark.skipif(sys.version_info[:3] == (3, 7, 0), reason="GH-35309") def test_read_json_table_orient(self, index_nm, vals, recwarn): df = DataFrame(vals, index=pd.Index(range(4), name=index_nm)) out = df.to_json(orient="table") From 41022a89779513324fbe6dd7e4855c586a0e2d79 Mon Sep 17 00:00:00 2001 From: Saul Shanabrook Date: Fri, 17 Jul 2020 12:24:14 -0400 Subject: [PATCH 159/632] Remove deprecated warn kwarg for matplotlib use (#35323) The warn param has been deprecated in 3.2.1: https://matplotlib.org/3.2.1/api/prev_api_changes/api_changes_3.2.0.html#matplotlib-use --- pandas/util/_test_decorators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 25394dc6775d8..a4a1d83177c50 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -94,7 +94,7 @@ def safe_import(mod_name: str, min_version: Optional[str] = None): def _skip_if_no_mpl(): mod = safe_import("matplotlib") if mod: - mod.use("Agg", warn=True) + mod.use("Agg") else: return True From 9faeab7d7ce2a7a45faae904878c2b9c6431e59c Mon Sep 17 00:00:00 2001 From: Steffen Schmitz Date: Fri, 17 Jul 2020 18:26:46 +0200 Subject: [PATCH 160/632] BUG: asymmetric error bars for series (GH9536) (#34514) --- doc/source/user_guide/visualization.rst | 2 +- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/plotting/_matplotlib/core.py | 16 +++++++++++++++- pandas/tests/plotting/test_series.py | 20 ++++++++++++++++++++ 4 files changed, 37 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 27826e7cde9e1..5bc87bca87211 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -1425,7 +1425,7 @@ Horizontal and vertical error bars can be supplied to the ``xerr`` and ``yerr`` * As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values. * As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series`. -Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length :class:`Series`, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array. +Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``N`` length :class:`Series`, a ``2xN`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array. Here is an example of one way to easily plot group means with standard deviations from the raw data. diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index bdb844ded59b7..d74c1bca61de8 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -340,6 +340,7 @@ Other enhancements - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) - ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`) +- :meth:`Series.plot` now supports asymmetric error bars. Previously, if :meth:`Series.plot` received a "2xN" array with error values for `yerr` and/or `xerr`, the left/lower values (first row) were mirrored, while the right/upper values (second row) were ignored. Now, the first row represents the left/lower error values and the second row the right/upper error values. (:issue:`9536`) .. --------------------------------------------------------------------------- diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index e510f7140519a..353bc8a8936a5 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -770,6 +770,12 @@ def _parse_errorbars(self, label, err): DataFrame/dict: error values are paired with keys matching the key in the plotted DataFrame str: the name of the column within the plotted DataFrame + + Asymmetrical error bars are also supported, however raw error values + must be provided in this case. For a ``N`` length :class:`Series`, a + ``2xN`` array should be provided indicating lower and upper (or left + and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors + should be in a ``Mx2xN`` array. """ if err is None: return None @@ -810,7 +816,15 @@ def match_labels(data, e): err_shape = err.shape # asymmetrical error bars - if err.ndim == 3: + if isinstance(self.data, ABCSeries) and err_shape[0] == 2: + err = np.expand_dims(err, 0) + err_shape = err.shape + if err_shape[2] != len(self.data): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape (2, {len(self.data)})" + ) + elif isinstance(self.data, ABCDataFrame) and err.ndim == 3: if ( (err_shape[0] != self.nseries) or (err_shape[1] != 2) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 64da98f57676f..316ca6ce91af7 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -729,6 +729,26 @@ def test_dup_datetime_index_plot(self): s = Series(values, index=index) _check_plot_works(s.plot) + def test_errorbar_asymmetrical(self): + # GH9536 + s = Series(np.arange(10), name="x") + err = np.random.rand(2, 10) + + ax = s.plot(yerr=err, xerr=err) + + result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()]) + expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1) + tm.assert_numpy_array_equal(result, expected) + + msg = ( + "Asymmetrical error bars should be provided " + f"with the shape \\(2, {len(s)}\\)" + ) + with pytest.raises(ValueError, match=msg): + s.plot(yerr=np.random.rand(2, 11)) + + tm.close() + @pytest.mark.slow def test_errorbar_plot(self): From cbcddbc8cfcec94a7c85ffce8cebeab578b0bdd4 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Fri, 17 Jul 2020 11:30:40 -0500 Subject: [PATCH 161/632] Doc fixups (#35327) --- doc/source/reference/groupby.rst | 1 + doc/source/reference/window.rst | 1 + doc/source/whatsnew/v1.1.0.rst | 95 +++++++++++++++----------------- 3 files changed, 46 insertions(+), 51 deletions(-) diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst index 76cb53559f334..ccf130d03418c 100644 --- a/doc/source/reference/groupby.rst +++ b/doc/source/reference/groupby.rst @@ -128,6 +128,7 @@ The following methods are available only for ``SeriesGroupBy`` objects. .. autosummary:: :toctree: api/ + SeriesGroupBy.hist SeriesGroupBy.nlargest SeriesGroupBy.nsmallest SeriesGroupBy.nunique diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst index d7e6405a3732b..611c0e0f7f160 100644 --- a/doc/source/reference/window.rst +++ b/doc/source/reference/window.rst @@ -86,3 +86,4 @@ Base class for defining custom window boundaries. api.indexers.BaseIndexer api.indexers.FixedForwardWindowIndexer + api.indexers.VariableOffsetWindowIndexer diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index d74c1bca61de8..b9ef1aae9a801 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -17,7 +17,7 @@ Enhancements KeyErrors raised by loc specify missing labels ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previously, if labels were missing for a loc call, a KeyError was raised stating that this was no longer supported. +Previously, if labels were missing for a ``.loc`` call, a KeyError was raised stating that this was no longer supported. Now the error message also includes a list of the missing labels (max 10 items, display width 80 characters). See :issue:`34272`. @@ -124,8 +124,6 @@ compatibility (:issue:`3729`) The default setting of ``dropna`` argument is ``True`` which means ``NA`` are not included in group keys. -.. versionadded:: 1.1.0 - .. _whatsnew_110.key_sorting: @@ -281,17 +279,17 @@ Other enhancements - Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. - Added a :func:`pandas.api.indexers.VariableOffsetWindowIndexer` class to support ``rolling`` operations with non-fixed offsets (:issue:`34994`) - :meth:`~DataFrame.describe` now includes a ``datetime_is_numeric`` keyword to control how datetime columns are summarized (:issue:`30164`, :issue:`34798`) -- :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`) -- :meth:`Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) -- When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`) -- `OptionError` is now exposed in `pandas.errors` (:issue:`27553`) -- Add :meth:`ExtensionArray.argmax` and :meth:`ExtensionArray.argmin` (:issue:`24382`) +- :class:`~pandas.io.formats.style.Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`) +- :meth:`~pandas.io.formats.style.Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) +- When writing directly to a sqlite connection :meth:`DataFrame.to_sql` now supports the ``multi`` method (:issue:`29921`) +- :class:`pandas.errors.OptionError` is now exposed in ``pandas.errors`` (:issue:`27553`) +- Add :meth:`api.extensions.ExtensionArray.argmax` and :meth:`api.extensions.ExtensionArray.argmin` (:issue:`24382`) - :func:`timedelta_range` will now infer a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`) - Positional slicing on a :class:`IntervalIndex` now supports slices with ``step > 1`` (:issue:`31658`) - :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`). - :meth:`DataFrame.sample` will now also allow array-like and BitGenerator objects to be passed to ``random_state`` as seeds (:issue:`32503`) -- :meth:`MultiIndex.union` will now raise `RuntimeWarning` if the object inside are unsortable, pass `sort=False` to suppress this warning (:issue:`33015`) -- :class:`Series.dt` and :class:`DatatimeIndex` now have an `isocalendar` method that returns a :class:`DataFrame` with year, week, and day calculated according to the ISO 8601 calendar (:issue:`33206`, :issue:`34392`). +- :meth:`Index.union` will now raise ``RuntimeWarning`` for :class:`MultiIndex` objects if the object inside are unsortable. Pass ``sort=False`` to suppress this warning (:issue:`33015`) +- Added :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returns a :class:`DataFrame` with year, week, and day calculated according to the ISO 8601 calendar (:issue:`33206`, :issue:`34392`). - The :meth:`DataFrame.to_feather` method now supports additional keyword arguments (e.g. to set the compression) that are added in pyarrow 0.17 (:issue:`33422`). @@ -305,32 +303,31 @@ Other enhancements - :meth:`melt` has gained an ``ignore_index`` (default ``True``) argument that, if set to ``False``, prevents the method from dropping the index (:issue:`17440`). - :meth:`Series.update` now accepts objects that can be coerced to a :class:`Series`, such as ``dict`` and ``list``, mirroring the behavior of :meth:`DataFrame.update` (:issue:`33215`) -- :meth:`~pandas.core.groupby.GroupBy.transform` and :meth:`~pandas.core.groupby.GroupBy.aggregate` has gained ``engine`` and ``engine_kwargs`` arguments that supports executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) +- :meth:`~pandas.core.groupby.DataFrameGroupBy.transform` and :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` has gained ``engine`` and ``engine_kwargs`` arguments that supports executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) - :meth:`~pandas.core.resample.Resampler.interpolate` now supports SciPy interpolation method :class:`scipy.interpolate.CubicSpline` as method ``cubicspline`` (:issue:`33670`) -- :class:`~pandas.core.groupby.generic.DataFrameGroupBy` and :class:`~pandas.core.groupby.generic.SeriesGroupBy` now implement the ``sample`` method for doing random sampling within groups (:issue:`31775`) +- :class:`~pandas.core.groupby.DataFrameGroupBy` and :class:`~pandas.core.groupby.SeriesGroupBy` now implement the ``sample`` method for doing random sampling within groups (:issue:`31775`) - :meth:`DataFrame.to_numpy` now supports the ``na_value`` keyword to control the NA sentinel in the output array (:issue:`33820`) -- The ``ExtensionArray`` class has now an :meth:`~pandas.arrays.ExtensionArray.equals` - method, similarly to :meth:`Series.equals` (:issue:`27081`). -- The minimum suppported dta version has increased to 105 in :meth:`~pandas.io.stata.read_stata` and :class:`~pandas.io.stata.StataReader` (:issue:`26667`). -- :meth:`~pandas.core.frame.DataFrame.to_stata` supports compression using the ``compression`` +- Added :class:`api.extension.ExtensionArray.equals` to the extension array interface, similarl to :meth:`Series.equals` (:issue:`27081`). +- The minimum supported dta version has increased to 105 in :meth:`read_stata` and :class:`~pandas.io.stata.StataReader` (:issue:`26667`). +- :meth:`~DataFrame.to_stata` supports compression using the ``compression`` keyword argument. Compression can either be inferred or explicitly set using a string or a dictionary containing both the method and any additional arguments that are passed to the compression library. Compression was also added to the low-level Stata-file writers :class:`~pandas.io.stata.StataWriter`, :class:`~pandas.io.stata.StataWriter117`, and :class:`~pandas.io.stata.StataWriterUTF8` (:issue:`26599`). -- :meth:`HDFStore.put` now accepts `track_times` parameter. Parameter is passed to ``create_table`` method of ``PyTables`` (:issue:`32682`). +- :meth:`HDFStore.put` now accepts a ``track_times`` parameter. Parameter is passed to ``create_table`` method of ``PyTables`` (:issue:`32682`). - :meth:`Series.plot` and :meth:`DataFrame.plot` now accepts `xlabel` and `ylabel` parameters to present labels on x and y axis (:issue:`9093`). -- Make :class:`pandas.core.window.Rolling` and :class:`pandas.core.window.Expanding` iterable(:issue:`11704`) +- Make :class:`pandas.core.window.rolling.Rolling` and :class:`pandas.core.window.expanding.Expanding` iterable(:issue:`11704`) - Make ``option_context`` a :class:`contextlib.ContextDecorator`, which allows it to be used as a decorator over an entire function (:issue:`34253`). - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now accept an ``errors`` argument (:issue:`22610`) -- :meth:`groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). -- :meth:`~pandas.io.json.read_json` now accepts `nrows` parameter. (:issue:`33916`). -- :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`) +- :meth:`~pandas.core.groupby.DataFrameGroupBy.groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). +- :meth:`read_json` now accepts an ``nrows`` parameter. (:issue:`33916`). +- :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`)0 - :func:`concat` and :meth:`~DataFrame.append` now preserve extension dtypes, for example combining a nullable integer column with a numpy integer column will no longer result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`, :issue:`34095`). -- :meth:`~pandas.io.gbq.read_gbq` now allows to disable progress bar (:issue:`33360`). -- :meth:`~pandas.io.gbq.read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). +- :meth:`read_gbq` now allows to disable progress bar (:issue:`33360`). +- :meth:`read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). - :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`). - :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list or dict to change only some specific columns' width (:issue:`28917`). - :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`) @@ -351,7 +348,7 @@ Notable bug fixes These are bug fixes that might have notable behavior changes. -``MultiIndex.get_indexer`` interprets `method` argument differently +``MultiIndex.get_indexer`` interprets ``method`` argument correctly ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This restores the behavior of :meth:`MultiIndex.get_indexer` with ``method='backfill'`` or ``method='pad'`` to the behavior before pandas 0.23.0. In particular, MultiIndexes are treated as a list of tuples and padding or backfilling is done with respect to the ordering of these lists of tuples (:issue:`29896`). @@ -411,8 +408,6 @@ And the differences in reindexing ``df`` with ``mi_2`` and using ``method='pad'` df.reindex(mi_2, method='pad') -- - .. _whatsnew_110.notable_bug_fixes.indexing_raises_key_errors: Failed Label-Based Lookups Always Raise KeyError @@ -522,8 +517,10 @@ those integer keys is not present in the first level of the index (:issue:`33539 .. ipython:: python - left_df = pd.DataFrame({'animal': ['dog', 'pig'], 'max_speed': [40, 11]}) - right_df = pd.DataFrame({'animal': ['quetzal', 'pig'], 'max_speed': [80, 11]}) + left_df = pd.DataFrame({'animal': ['dog', 'pig'], + 'max_speed': [40, 11]}) + right_df = pd.DataFrame({'animal': ['quetzal', 'pig'], + 'max_speed': [80, 11]}) left_df right_df @@ -622,7 +619,7 @@ Using :meth:`DataFrame.groupby` with ``as_index=False`` and the function ``idxma df.groupby("a", as_index=False).nunique() -The method :meth:`core.DataFrameGroupBy.size` would previously ignore ``as_index=False``. Now the grouping columns are returned as columns, making the result a `DataFrame` instead of a `Series`. (:issue:`32599`) +The method :meth:`~pandas.core.groupby.DataFrameGroupBy.size` would previously ignore ``as_index=False``. Now the grouping columns are returned as columns, making the result a `DataFrame` instead of a `Series`. (:issue:`32599`) *Previous behavior*: @@ -643,11 +640,11 @@ The method :meth:`core.DataFrameGroupBy.size` would previously ignore ``as_index .. _whatsnew_110.api_breaking.groupby_results_lost_as_index_false: -:meth:`DataFrameGroupby.agg` lost results with ``as_index`` ``False`` when relabeling columns -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:meth:`~pandas.core.groupby.DataFrameGroupby.agg` lost results with ``as_index=False`` when relabeling columns +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previously :meth:`DataFrameGroupby.agg` lost the result columns, when the ``as_index`` option was -set to ``False`` and the result columns were relabeled. In this case he result values were replaced with +Previously :meth:`~pandas.core.groupby.DataFrameGroupby.agg` lost the result columns, when the ``as_index`` option was +set to ``False`` and the result columns were relabeled. In this case the result values were replaced with the previous index (:issue:`32240`). .. ipython:: python @@ -812,7 +809,7 @@ Deprecations positional arguments is deprecated since version 1.1. All other arguments should be given as keyword arguments (:issue:`27573`). -- Passing any arguments but `path_or_buf` (the first one) to +- Passing any arguments but ``path_or_buf`` (the first one) to :func:`read_json` as positional arguments is deprecated since version 1.1. All other arguments should be given as keyword arguments (:issue:`27573`). @@ -821,23 +818,20 @@ Deprecations positional arguments is deprecated since version 1.1. All other arguments should be given as keyword arguments (:issue:`27573`). -- :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use `:func:pandas.api.types.is_categorical_dtype` instead (:issue:`33385`) +- :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use :func:`pandas.api.types.is_categorical_dtype` instead (:issue:`33385`) - :meth:`Index.get_value` is deprecated and will be removed in a future version (:issue:`19728`) - :meth:`Series.dt.week` and `Series.dt.weekofyear` are deprecated and will be removed in a future version, use :meth:`Series.dt.isocalendar().week` instead (:issue:`33595`) -- :meth:`DatetimeIndex.week` and `DatetimeIndex.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeIndex.isocalendar().week` instead (:issue:`33595`) -- :meth:`DatetimeArray.week` and `DatetimeArray.weekofyear` are deprecated and will be removed in a future version, use :meth:`DatetimeArray.isocalendar().week` instead (:issue:`33595`) +- :meth:`DatetimeIndex.week` and ``DatetimeIndex.weekofyear`` are deprecated and will be removed in a future version, use ``DatetimeIndex.isocalendar().week`` instead (:issue:`33595`) +- :meth:`DatetimeArray.week` and ``DatetimeArray.weekofyear`` are deprecated and will be removed in a future version, use ``DatetimeArray.isocalendar().week`` instead (:issue:`33595`) - :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`) -- :meth:`~BusinessDay.apply_index` is deprecated and will be removed in a future version. Use ``offset + other`` instead (:issue:`34580`) +- :meth:`~pandas.tseries.offsets.BusinessDay.apply_index` is deprecated and will be removed in a future version. Use ``offset + other`` instead (:issue:`34580`) - :meth:`DataFrame.tshift` and :meth:`Series.tshift` are deprecated and will be removed in a future version, use :meth:`DataFrame.shift` and :meth:`Series.shift` instead (:issue:`11631`) - Indexing an :class:`Index` object with a float key is deprecated, and will raise an ``IndexError`` in the future. You can manually convert to an integer key instead (:issue:`34191`). -- The ``squeeze`` keyword in the ``groupby`` function is deprecated and will be removed in a future version (:issue:`32380`) -- The ``tz`` keyword in :meth:`Period.to_timestamp` is deprecated and will be removed in a future version; use `per.to_timestamp(...).tz_localize(tz)`` instead (:issue:`34522`) +- The ``squeeze`` keyword in :meth:`~DataFrame.groupby` is deprecated and will be removed in a future version (:issue:`32380`) +- The ``tz`` keyword in :meth:`Period.to_timestamp` is deprecated and will be removed in a future version; use ``per.to_timestamp(...).tz_localize(tz)`` instead (:issue:`34522`) - :meth:`DatetimeIndex.to_perioddelta` is deprecated and will be removed in a future version. Use ``index - index.to_period(freq).to_timestamp()`` instead (:issue:`34853`) -- :meth:`util.testing.assert_almost_equal` now accepts both relative and absolute - precision through the ``rtol``, and ``atol`` parameters, thus deprecating the - ``check_less_precise`` parameter. (:issue:`13357`). - :func:`DataFrame.melt` accepting a value_name that already exists is deprecated, and will be removed in a future version (:issue:`34731`) - the ``center`` keyword in the :meth:`DataFrame.expanding` function is deprecated and will be removed in a future version (:issue:`20647`) @@ -930,10 +924,10 @@ Timedelta ^^^^^^^^^ - Bug in constructing a :class:`Timedelta` with a high precision integer that would round the :class:`Timedelta` components (:issue:`31354`) -- Bug in dividing ``np.nan`` or ``None`` by :class:`Timedelta`` incorrectly returning ``NaT`` (:issue:`31869`) +- Bug in dividing ``np.nan`` or ``None`` by :class:`Timedelta` incorrectly returning ``NaT`` (:issue:`31869`) - Timedeltas now understand ``µs`` as identifier for microsecond (:issue:`32899`) - :class:`Timedelta` string representation now includes nanoseconds, when nanoseconds are non-zero (:issue:`9309`) -- Bug in comparing a :class:`Timedelta`` object against a ``np.ndarray`` with ``timedelta64`` dtype incorrectly viewing all entries as unequal (:issue:`33441`) +- Bug in comparing a :class:`Timedelta` object against a ``np.ndarray`` with ``timedelta64`` dtype incorrectly viewing all entries as unequal (:issue:`33441`) - Bug in :func:`timedelta_range` that produced an extra point on a edge case (:issue:`30353`, :issue:`33498`) - Bug in :meth:`DataFrame.resample` that produced an extra point on a edge case (:issue:`30353`, :issue:`13022`, :issue:`33498`) - Bug in :meth:`DataFrame.resample` that ignored the ``loffset`` argument when dealing with timedelta (:issue:`7687`, :issue:`33498`) @@ -972,12 +966,11 @@ Strings - Bug in the :meth:`~Series.astype` method when converting "string" dtype data to nullable integer dtype (:issue:`32450`). - Fixed issue where taking ``min`` or ``max`` of a ``StringArray`` or ``Series`` with ``StringDtype`` type would raise. (:issue:`31746`) - Bug in :meth:`Series.str.cat` returning ``NaN`` output when other had :class:`Index` type (:issue:`33425`) -- :func: `pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. +- :func:`pandas.api.dtypes.is_string_dtype` no longer incorrectly identifies categorical series as string. Interval ^^^^^^^^ - Bug in :class:`IntervalArray` incorrectly allowing the underlying data to be changed when setting values (:issue:`32782`) -- Indexing ^^^^^^^^ @@ -1002,8 +995,8 @@ Indexing - Bug in :meth:`DataFrame.copy` _item_cache not invalidated after copy causes post-copy value updates to not be reflected (:issue:`31784`) - Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) - Bug in `Series.__getitem__` with an integer key and a :class:`MultiIndex` with leading integer level failing to raise ``KeyError`` if the key is not present in the first level (:issue:`33355`) -- Bug in :meth:`DataFrame.iloc` when slicing a single column-:class:`DataFrame`` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`) -- Bug in :meth:`DatetimeIndex.insert` and :meth:`TimedeltaIndex.insert` causing index ``freq`` to be lost when setting an element into an empty :class:`Series` (:issue:33573`) +- Bug in :meth:`DataFrame.iloc` when slicing a single column-:class:`DataFrame` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`) +- Bug in :meth:`DatetimeIndex.insert` and :meth:`TimedeltaIndex.insert` causing index ``freq`` to be lost when setting an element into an empty :class:`Series` (:issue:`33573`) - Bug in :meth:`Series.__setitem__` with an :class:`IntervalIndex` and a list-like key of integers (:issue:`33473`) - Bug in :meth:`Series.__getitem__` allowing missing labels with ``np.ndarray``, :class:`Index`, :class:`Series` indexers but not ``list``, these now all raise ``KeyError`` (:issue:`33646`) - Bug in :meth:`DataFrame.truncate` and :meth:`Series.truncate` where index was assumed to be monotone increasing (:issue:`33756`) @@ -1089,7 +1082,7 @@ I/O - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) - Bug where capitalised files extensions were not decompressed by read_* functions (:issue:`35164`) - Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) -- Bug in "meth"`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) +- Bug in :meth:`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) - :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) Plotting @@ -1196,7 +1189,7 @@ Other - Bug in :class:`DataFrame` when initiating a frame with lists and assign ``columns`` with nested list for ``MultiIndex`` (:issue:`32173`) - Bug in :meth:`DataFrame.to_records` incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`) - Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if left object is a different subclass with ``check_series_type=True`` (:issue:`32670`). -- :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:32538`) +- :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:`32538`) - Getting a missing attribute in a query/eval string raises the correct ``AttributeError`` (:issue:`32408`) - Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`) - Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`) From c94f60215054e24972039c1f305c72a6dff6e17a Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Fri, 17 Jul 2020 12:32:55 -0500 Subject: [PATCH 162/632] more fixups (#35329) --- doc/source/whatsnew/v1.1.0.rst | 35 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index b9ef1aae9a801..285cfdfc4c431 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -274,6 +274,8 @@ change, as ``fsspec`` will still bring in the same packages as before. Other enhancements ^^^^^^^^^^^^^^^^^^ +- :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:`32538`) +- :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) - Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). - Added :meth:`DataFrame.value_counts` (:issue:`5377`) - Added a :func:`pandas.api.indexers.FixedForwardWindowIndexer` class to support forward-looking windows during ``rolling`` operations. @@ -919,6 +921,9 @@ Datetimelike - The ``freq`` keyword in :class:`Period`, :func:`date_range`, :func:`period_range`, :func:`pd.tseries.frequencies.to_offset` no longer allows tuples, pass as string instead (:issue:`34703`) - Bug in :meth:`DataFrame.append` when appending a :class:`Series` containing a scalar tz-aware :class:`Timestamp` to an empty :class:`DataFrame` resulted in an object column instead of datetime64[ns, tz] dtype (:issue:`35038`) - ``OutOfBoundsDatetime`` issues an improved error message when timestamp is out of implementation bounds. (:issue:`32967`) +- Bug in :meth:`AbstractHolidayCalendar.holidays` when no rules were defined (:issue:`31415`) +- Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) +- Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`) Timedelta ^^^^^^^^^ @@ -937,7 +942,6 @@ Timezones ^^^^^^^^^ - Bug in :func:`to_datetime` with ``infer_datetime_format=True`` where timezone names (e.g. ``UTC``) would not be parsed correctly (:issue:`33133`) -- Numeric @@ -953,12 +957,17 @@ Numeric - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) - Bug in :meth:`Index.difference` incorrect results when comparing a :class:`Float64Index` and object :class:`Index` (:issue:`35217`) - Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) +- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) Conversion ^^^^^^^^^^ - Bug in :class:`Series` construction from NumPy array with big-endian ``datetime64`` dtype (:issue:`29684`) - Bug in :class:`Timedelta` construction with large nanoseconds keyword value (:issue:`32402`) - Bug in :class:`DataFrame` construction where sets would be duplicated rather than raising (:issue:`32582`) +- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). +- Bug in :class:`DataFrame` when initiating a frame with lists and assign ``columns`` with nested list for ``MultiIndex`` (:issue:`32173`) +- Improved error message for invalid construction of list when creating a new index (:issue:`35190`) + Strings ^^^^^^^ @@ -1016,6 +1025,7 @@ Missing - :meth:`DataFrame.interpolate` uses the correct axis convention now. Previously interpolating along columns lead to interpolation along indices and vice versa. Furthermore interpolating with methods ``pad``, ``ffill``, ``bfill`` and ``backfill`` are identical to using these methods with :meth:`fillna` (:issue:`12918`, :issue:`29146`) - Bug in :meth:`DataFrame.interpolate` when called on a DataFrame with column names of string type was throwing a ValueError. The method is no independing of the type of column names (:issue:`33956`) - passing :class:`NA` will into a format string using format specs will now work. For example ``"{:.1f}".format(pd.NA)`` would previously raise a ``ValueError``, but will now return the string ``""`` (:issue:`34740`) +- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`) MultiIndex ^^^^^^^^^^ @@ -1044,6 +1054,7 @@ MultiIndex I/O ^^^ +- Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) - Bug in print-out when ``display.precision`` is zero. (:issue:`20359`) - Bug in :meth:`read_json` where integer overflow was occurring when json contains big number strings. (:issue:`30320`) - `read_csv` will now raise a ``ValueError`` when the arguments `header` and `prefix` both are not `None`. (:issue:`27394`) @@ -1084,6 +1095,7 @@ I/O - Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) - Bug in :meth:`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) - :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) +- Bug in :meth:`DataFrame.to_records` incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`) Plotting ^^^^^^^^ @@ -1095,6 +1107,7 @@ Plotting - Bug in :meth:`DataFrame.plot.scatter` that when adding multiple plots with different ``cmap``, colorbars alway use the first ``cmap`` (:issue:`33389`) - Bug in :meth:`DataFrame.plot.scatter` was adding a colorbar to the plot even if the argument `c` was assigned to a column containing color names (:issue:`34316`) - Bug in :meth:`pandas.plotting.bootstrap_plot` was causing cluttered axes and overlapping labels (:issue:`34905`) +- Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1140,6 +1153,7 @@ Reshaping - Bug in :meth:`concat` where when passing a non-dict mapping as ``objs`` would raise a ``TypeError`` (:issue:`32863`) - :meth:`DataFrame.agg` now provides more descriptive ``SpecificationError`` message when attempting to aggregating non-existant column (:issue:`32755`) - Bug in :meth:`DataFrame.unstack` when MultiIndexed columns and MultiIndexed rows were used (:issue:`32624`, :issue:`24729` and :issue:`28306`) +- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True`` instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) - Bug in :meth:`DataFrame.corrwith()`, :meth:`DataFrame.memory_usage()`, :meth:`DataFrame.dot()`, :meth:`DataFrame.idxmin()`, :meth:`DataFrame.idxmax()`, :meth:`DataFrame.duplicated()`, :meth:`DataFrame.isin()`, :meth:`DataFrame.count()`, :meth:`Series.explode()`, :meth:`Series.asof()` and :meth:`DataFrame.asof()` not @@ -1180,28 +1194,12 @@ ExtensionArray Other ^^^^^ -- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). -- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) -- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True`` - instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) - Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`) -- Bug in :meth:`AbstractHolidayCalendar.holidays` when no rules were defined (:issue:`31415`) -- Bug in :class:`DataFrame` when initiating a frame with lists and assign ``columns`` with nested list for ``MultiIndex`` (:issue:`32173`) -- Bug in :meth:`DataFrame.to_records` incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`) - Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if left object is a different subclass with ``check_series_type=True`` (:issue:`32670`). -- :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:`32538`) - Getting a missing attribute in a query/eval string raises the correct ``AttributeError`` (:issue:`32408`) - Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`) -- Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`) - Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`) -- Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) -- :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) -- Bug in :meth:`DataFrame.equals` and :meth:`Series.equals` in allowing subclasses - to be equal (:issue:`34402`). -- Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) -- Bug in :class:`Tick` multiplication raising ``TypeError`` when multiplying by a float (:issue:`34486`) -- Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) -- Improved error message for invalid construction of list when creating a new index (:issue:`35190`) +- Bug in :meth:`DataFrame.equals` and :meth:`Series.equals` in allowing subclasses to be equal (:issue:`34402`). .. --------------------------------------------------------------------------- @@ -1210,3 +1208,4 @@ Other Contributors ~~~~~~~~~~~~ +.. contributors:: v1.0.5..v1.1.0|HEAD From d33c801141daafb519aa9b91630c1cf99c353c6e Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Fri, 17 Jul 2020 13:49:27 -0500 Subject: [PATCH 163/632] Optimize array_equivalent for NDFrame.equals (#35328) --- pandas/core/dtypes/missing.py | 96 +++++++++++++++++++---------- pandas/core/internals/managers.py | 4 +- pandas/tests/dtypes/test_missing.py | 59 ++++++++++++++---- 3 files changed, 113 insertions(+), 46 deletions(-) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 75188ad5b00eb..8551ce9f14e6c 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -355,7 +355,9 @@ def _isna_compat(arr, fill_value=np.nan) -> bool: return True -def array_equivalent(left, right, strict_nan: bool = False) -> bool: +def array_equivalent( + left, right, strict_nan: bool = False, dtype_equal: bool = False +) -> bool: """ True if two arrays, left and right, have equal non-NaN elements, and NaNs in corresponding locations. False otherwise. It is assumed that left and @@ -368,6 +370,12 @@ def array_equivalent(left, right, strict_nan: bool = False) -> bool: left, right : ndarrays strict_nan : bool, default False If True, consider NaN and None to be different. + dtype_equal : bool, default False + Whether `left` and `right` are known to have the same dtype + according to `is_dtype_equal`. Some methods like `BlockManager.equals`. + require that the dtypes match. Setting this to ``True`` can improve + performance, but will give different results for arrays that are + equal but different dtypes. Returns ------- @@ -391,43 +399,28 @@ def array_equivalent(left, right, strict_nan: bool = False) -> bool: if left.shape != right.shape: return False + if dtype_equal: + # fastpath when we require that the dtypes match (Block.equals) + if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype): + return _array_equivalent_float(left, right) + elif is_datetimelike_v_numeric(left.dtype, right.dtype): + return False + elif needs_i8_conversion(left.dtype): + return _array_equivalent_datetimelike(left, right) + elif is_string_dtype(left.dtype): + # TODO: fastpath for pandas' StringDtype + return _array_equivalent_object(left, right, strict_nan) + else: + return np.array_equal(left, right) + + # Slow path when we allow comparing different dtypes. # Object arrays can contain None, NaN and NaT. # string dtypes must be come to this path for NumPy 1.7.1 compat if is_string_dtype(left.dtype) or is_string_dtype(right.dtype): - - if not strict_nan: - # isna considers NaN and None to be equivalent. - return lib.array_equivalent_object( - ensure_object(left.ravel()), ensure_object(right.ravel()) - ) - - for left_value, right_value in zip(left, right): - if left_value is NaT and right_value is not NaT: - return False - - elif left_value is libmissing.NA and right_value is not libmissing.NA: - return False - - elif isinstance(left_value, float) and np.isnan(left_value): - if not isinstance(right_value, float) or not np.isnan(right_value): - return False - else: - try: - if np.any(np.asarray(left_value != right_value)): - return False - except TypeError as err: - if "Cannot compare tz-naive" in str(err): - # tzawareness compat failure, see GH#28507 - return False - elif "boolean value of NA is ambiguous" in str(err): - return False - raise - return True + return _array_equivalent_object(left, right, strict_nan) # NaNs can occur in float and complex arrays. if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype): - - # empty if not (np.prod(left.shape) and np.prod(right.shape)): return True return ((left == right) | (isna(left) & isna(right))).all() @@ -452,6 +445,45 @@ def array_equivalent(left, right, strict_nan: bool = False) -> bool: return np.array_equal(left, right) +def _array_equivalent_float(left, right): + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + + +def _array_equivalent_datetimelike(left, right): + return np.array_equal(left.view("i8"), right.view("i8")) + + +def _array_equivalent_object(left, right, strict_nan): + if not strict_nan: + # isna considers NaN and None to be equivalent. + return lib.array_equivalent_object( + ensure_object(left.ravel()), ensure_object(right.ravel()) + ) + + for left_value, right_value in zip(left, right): + if left_value is NaT and right_value is not NaT: + return False + + elif left_value is libmissing.NA and right_value is not libmissing.NA: + return False + + elif isinstance(left_value, float) and np.isnan(left_value): + if not isinstance(right_value, float) or not np.isnan(right_value): + return False + else: + try: + if np.any(np.asarray(left_value != right_value)): + return False + except TypeError as err: + if "Cannot compare tz-naive" in str(err): + # tzawareness compat failure, see GH#28507 + return False + elif "boolean value of NA is ambiguous" in str(err): + return False + raise + return True + + def _infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index d5947726af7fd..895385b170c91 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1436,7 +1436,7 @@ def equals(self, other: "BlockManager") -> bool: return array_equivalent(left, right) for i in range(len(self.items)): - # Check column-wise, return False if any column doesnt match + # Check column-wise, return False if any column doesn't match left = self.iget_values(i) right = other.iget_values(i) if not is_dtype_equal(left.dtype, right.dtype): @@ -1445,7 +1445,7 @@ def equals(self, other: "BlockManager") -> bool: if not left.equals(right): return False else: - if not array_equivalent(left, right): + if not array_equivalent(left, right, dtype_equal=True): return False return True diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index f9a854c5778a2..04dde08de082d 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -300,50 +300,80 @@ def test_period(self): tm.assert_series_equal(notna(s), ~exp) -def test_array_equivalent(): - assert array_equivalent(np.array([np.nan, np.nan]), np.array([np.nan, np.nan])) +@pytest.mark.parametrize("dtype_equal", [True, False]) +def test_array_equivalent(dtype_equal): assert array_equivalent( - np.array([np.nan, 1, np.nan]), np.array([np.nan, 1, np.nan]) + np.array([np.nan, np.nan]), np.array([np.nan, np.nan]), dtype_equal=dtype_equal + ) + assert array_equivalent( + np.array([np.nan, 1, np.nan]), + np.array([np.nan, 1, np.nan]), + dtype_equal=dtype_equal, ) assert array_equivalent( np.array([np.nan, None], dtype="object"), np.array([np.nan, None], dtype="object"), + dtype_equal=dtype_equal, ) # Check the handling of nested arrays in array_equivalent_object assert array_equivalent( np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"), np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"), + dtype_equal=dtype_equal, ) assert array_equivalent( np.array([np.nan, 1 + 1j], dtype="complex"), np.array([np.nan, 1 + 1j], dtype="complex"), + dtype_equal=dtype_equal, ) assert not array_equivalent( np.array([np.nan, 1 + 1j], dtype="complex"), np.array([np.nan, 1 + 2j], dtype="complex"), + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + np.array([np.nan, 1, np.nan]), + np.array([np.nan, 2, np.nan]), + dtype_equal=dtype_equal, + ) + assert not array_equivalent( + np.array(["a", "b", "c", "d"]), np.array(["e", "e"]), dtype_equal=dtype_equal + ) + assert array_equivalent( + Float64Index([0, np.nan]), Float64Index([0, np.nan]), dtype_equal=dtype_equal ) assert not array_equivalent( - np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]) + Float64Index([0, np.nan]), Float64Index([1, np.nan]), dtype_equal=dtype_equal + ) + assert array_equivalent( + DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]), dtype_equal=dtype_equal + ) + assert not array_equivalent( + DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]), dtype_equal=dtype_equal + ) + assert array_equivalent( + TimedeltaIndex([0, np.nan]), + TimedeltaIndex([0, np.nan]), + dtype_equal=dtype_equal, ) - assert not array_equivalent(np.array(["a", "b", "c", "d"]), np.array(["e", "e"])) - assert array_equivalent(Float64Index([0, np.nan]), Float64Index([0, np.nan])) - assert not array_equivalent(Float64Index([0, np.nan]), Float64Index([1, np.nan])) - assert array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan])) - assert not array_equivalent(DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan])) - assert array_equivalent(TimedeltaIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) assert not array_equivalent( - TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]) + TimedeltaIndex([0, np.nan]), + TimedeltaIndex([1, np.nan]), + dtype_equal=dtype_equal, ) assert array_equivalent( DatetimeIndex([0, np.nan], tz="US/Eastern"), DatetimeIndex([0, np.nan], tz="US/Eastern"), + dtype_equal=dtype_equal, ) assert not array_equivalent( DatetimeIndex([0, np.nan], tz="US/Eastern"), DatetimeIndex([1, np.nan], tz="US/Eastern"), + dtype_equal=dtype_equal, ) + # The rest are not dtype_equal assert not array_equivalent( - DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern") + DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern"), ) assert not array_equivalent( DatetimeIndex([0, np.nan], tz="CET"), @@ -353,6 +383,11 @@ def test_array_equivalent(): assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) +def test_array_equivalent_different_dtype_but_equal(): + # Unclear if this is exposed anywhere in the public-facing API + assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0])) + + @pytest.mark.parametrize( "lvalue, rvalue", [ From ddc644242e466046d1537fc77aeec1ab14aaea14 Mon Sep 17 00:00:00 2001 From: salem3358 <46143571+salem3358@users.noreply.github.com> Date: Sat, 18 Jul 2020 01:49:53 +0700 Subject: [PATCH 164/632] Fix AttributeError when groupby as_index=False on empty DataFrame (#35324) --- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/core/groupby/generic.py | 17 +++++++++++------ pandas/tests/groupby/test_groupby.py | 8 ++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 285cfdfc4c431..43d1244c15d8a 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1130,6 +1130,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`) - Bug in :meth:`core.groupby.DataFrameGroupBy.quantile` raises ``TypeError`` for non-numeric types rather than dropping columns (:issue:`27892`) - Bug in :meth:`core.groupby.DataFrameGroupBy.transform` when ``func='nunique'`` and columns are of type ``datetime64``, the result would also be of type ``datetime64`` instead of ``int64`` (:issue:`35109`) +- Bug in :meth:`DataFrame.groupby` raising an ``AttributeError`` when selecting a column and aggregating with ``as_index=False`` (:issue:`35246`). - Bug in :meth:'DataFrameGroupBy.first' and :meth:'DataFrameGroupBy.last' that would raise an unnecessary ``ValueError`` when grouping on multiple ``Categoricals`` (:issue:`34951`) Reshaping diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1d14361757e4a..ec7b14f27c5a1 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -963,18 +963,23 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) # try to treat as if we are passing a list try: result = self._aggregate_multiple_funcs([func], _axis=self.axis) - except ValueError as err: - if "no results" not in str(err): - # raised directly by _aggregate_multiple_funcs - raise - result = self._aggregate_frame(func) - else: + # select everything except for the last level, which is the one # containing the name of the function(s), see GH 32040 result.columns = result.columns.rename( [self._selected_obj.columns.name] * result.columns.nlevels ).droplevel(-1) + except ValueError as err: + if "no results" not in str(err): + # raised directly by _aggregate_multiple_funcs + raise + result = self._aggregate_frame(func) + except AttributeError: + # catch exception from line 969 + # (Series does not have attribute "columns"), see GH 35246 + result = self._aggregate_frame(func) + if relabeling: # used reordered index of columns diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 0d040b8e6955a..ebce5b0ef0a66 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -605,6 +605,14 @@ def test_as_index_select_column(): tm.assert_series_equal(result, expected) +def test_groupby_as_index_select_column_sum_empty_df(): + # GH 35246 + df = DataFrame(columns=["A", "B", "C"]) + left = df.groupby(by="A", as_index=False)["B"].sum() + assert type(left) is DataFrame + assert left.to_dict() == {"A": {}, "B": {}} + + def test_groupby_as_index_agg(df): grouped = df.groupby("A", as_index=False) From bfac13628394ac7317bb94833319bd6bf87603af Mon Sep 17 00:00:00 2001 From: Pandas Development Team Date: Fri, 17 Jul 2020 14:03:50 -0500 Subject: [PATCH 165/632] RLS: 1.1.0rc0 From e673b6907c1ce6ef67d67dc3638310567d1c6415 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 20 Jul 2020 17:17:50 -0500 Subject: [PATCH 166/632] CI: pin matplotlib for doc build (#35358) --- ci/deps/azure-37-locale.yaml | 2 +- environment.yml | 2 +- requirements-dev.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 714e1100b1e1a..77aae791a47c1 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -18,7 +18,7 @@ dependencies: - ipython - jinja2 - lxml - - matplotlib + - matplotlib <3.3.0 - moto - nomkl - numexpr diff --git a/environment.yml b/environment.yml index 53106906a52cb..53222624619de 100644 --- a/environment.yml +++ b/environment.yml @@ -73,7 +73,7 @@ dependencies: - ipykernel - ipython>=7.11.1 - jinja2 # pandas.Styler - - matplotlib>=2.2.2 # pandas.plotting, Series.plot, DataFrame.plot + - matplotlib>=2.2.2,<3.3.0 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.6.8 - scipy>=1.2 - numba>=0.46.0 diff --git a/requirements-dev.txt b/requirements-dev.txt index 1ec998ffa72d4..0c024d1b54637 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -47,7 +47,7 @@ bottleneck>=1.2.1 ipykernel ipython>=7.11.1 jinja2 -matplotlib>=2.2.2 +matplotlib>=2.2.2,<3.3.0 numexpr>=2.6.8 scipy>=1.2 numba>=0.46.0 From 3e88e170a436b5e3cfdfc29f8a7416220658c616 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 21 Jul 2020 12:20:00 +0100 Subject: [PATCH 167/632] REGR: MultiIndex Indexing (#35353) * REGR: MultiIndex Indexing * add test --- pandas/core/indexes/base.py | 5 ++++- pandas/tests/indexing/multiindex/test_loc.py | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3dbee7d0929cb..986d6323e704e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3397,7 +3397,10 @@ def _reindex_non_unique(self, target): new_indexer = np.arange(len(self.take(indexer))) new_indexer[~check] = -1 - new_index = Index(new_labels, name=self.name) + if isinstance(self, ABCMultiIndex): + new_index = type(self).from_tuples(new_labels, names=self.names) + else: + new_index = Index(new_labels, name=self.name) return new_index, indexer, new_indexer # -------------------------------------------------------------------- diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index f0cbdbe8d0564..63983f45d7832 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -491,3 +491,22 @@ def test_loc_datetime_mask_slicing(): ), ) tm.assert_series_equal(result, expected) + + +def test_loc_with_mi_indexer(): + # https://github.com/pandas-dev/pandas/issues/35351 + df = DataFrame( + data=[["a", 1], ["a", 0], ["b", 1], ["c", 2]], + index=MultiIndex.from_tuples( + [(0, 1), (1, 0), (1, 1), (1, 1)], names=["index", "date"] + ), + columns=["author", "price"], + ) + idx = MultiIndex.from_tuples([(0, 1), (1, 1)], names=["index", "date"]) + result = df.loc[idx, :] + expected = DataFrame( + [["a", 1], ["b", 1], ["c", 2]], + index=MultiIndex.from_tuples([(0, 1), (1, 1), (1, 1)], names=["index", "date"]), + columns=["author", "price"], + ) + tm.assert_frame_equal(result, expected) From 06e2793a35e74303d7cc5dd47eb5959387244655 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Wed, 22 Jul 2020 03:45:40 -0700 Subject: [PATCH 168/632] Change defaults for rolling/expanding.apply engine kwargs to None (#35374) * Change defaults for rolling/expanding.apply engine kwargs to None * Add whatsnew Co-authored-by: Matt Roeschke --- doc/source/whatsnew/v1.1.0.rst | 2 +- pandas/core/window/expanding.py | 2 +- pandas/core/window/rolling.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 43d1244c15d8a..55e2a810e6fc3 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -338,7 +338,7 @@ Other enhancements - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) -- ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`) +- ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`, :issue:`35374`) - :meth:`Series.plot` now supports asymmetric error bars. Previously, if :meth:`Series.plot` received a "2xN" array with error values for `yerr` and/or `xerr`, the left/lower values (first row) were mirrored, while the right/upper values (second row) were ignored. Now, the first row represents the left/lower error values and the second row the right/upper error values. (:issue:`9536`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 8267cd4f0971e..ce4ab2f98c23d 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -137,7 +137,7 @@ def apply( self, func, raw: bool = False, - engine: str = "cython", + engine: Optional[str] = None, engine_kwargs: Optional[Dict[str, bool]] = None, args=None, kwargs=None, diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 48953f6a75487..445f179248226 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1344,7 +1344,7 @@ def apply( self, func, raw: bool = False, - engine: str = "cython", + engine: Optional[str] = None, engine_kwargs: Optional[Dict] = None, args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, From d3343d9e7f2aa93572d73335a1cc489db267d1ee Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 23 Jul 2020 14:10:33 -0500 Subject: [PATCH 169/632] PKG: Set max pin for Cython (#35396) We know that pandas doesn't work with Cython 3.0 (https://github.com/pandas-dev/pandas/issues/34213, https://github.com/pandas-dev/pandas/issues/34014) This sets the maximum supported version of Cython in our pyproject.toml to ensure that pandas 1.1.0 can continue to be built from source without Cython pre-installed after Cython 3.0 is released. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index aaebcff8e4c1e..f282f2a085000 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = [ "setuptools", "wheel", - "Cython>=0.29.16", # Note: sync with setup.py + "Cython>=0.29.16,<3", # Note: sync with setup.py "numpy==1.15.4; python_version=='3.6' and platform_system!='AIX'", "numpy==1.15.4; python_version=='3.7' and platform_system!='AIX'", "numpy==1.17.3; python_version>='3.8' and platform_system!='AIX'", From 00ea10c158ac3d4087f6010f375084e9ddfcab99 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 23 Jul 2020 17:06:03 -0500 Subject: [PATCH 170/632] Matplotlib 3.3 compatibility fixups (#35393) --- ci/deps/azure-37-locale.yaml | 2 +- doc/source/whatsnew/v1.1.0.rst | 1 + pandas/plotting/_matplotlib/boxplot.py | 5 ++++ pandas/plotting/_matplotlib/compat.py | 1 + pandas/plotting/_matplotlib/converter.py | 34 +++++----------------- pandas/tests/plotting/test_converter.py | 6 ++-- pandas/tests/plotting/test_datetimelike.py | 14 +++++---- pandas/tests/plotting/test_frame.py | 1 + pandas/tests/plotting/test_series.py | 10 +++++-- 9 files changed, 34 insertions(+), 40 deletions(-) diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 77aae791a47c1..4dbb6a5344976 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -18,7 +18,7 @@ dependencies: - ipython - jinja2 - lxml - - matplotlib <3.3.0 + - matplotlib>=3.3.0 - moto - nomkl - numexpr diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 55e2a810e6fc3..b1ab54a608748 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -274,6 +274,7 @@ change, as ``fsspec`` will still bring in the same packages as before. Other enhancements ^^^^^^^^^^^^^^^^^^ +- Compatibility with matplotlib 3.3.0 (:issue:`34850`) - :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:`32538`) - :class:`IntegerArray` now implements the ``sum`` operation (:issue:`33172`) - Added :class:`pandas.errors.InvalidIndexError` (:issue:`34570`). diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 4b79bef41d025..53ef97bbe9a72 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -299,6 +299,11 @@ def plot_group(keys, values, ax): if fontsize is not None: ax.tick_params(axis="both", labelsize=fontsize) if kwds.get("vert", 1): + ticks = ax.get_xticks() + if len(ticks) != len(keys): + i, remainder = divmod(len(ticks), len(keys)) + assert remainder == 0, remainder + keys *= i ax.set_xticklabels(keys, rotation=rot) else: ax.set_yticklabels(keys, rotation=rot) diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py index f2c5032112bc9..7f107f18eca25 100644 --- a/pandas/plotting/_matplotlib/compat.py +++ b/pandas/plotting/_matplotlib/compat.py @@ -21,3 +21,4 @@ def inner(): _mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge) _mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) _mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) +_mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 05377e0c240b9..8f2080658e63e 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -16,7 +16,6 @@ from pandas._libs.tslibs.offsets import BaseOffset from pandas.core.dtypes.common import ( - is_datetime64_ns_dtype, is_float, is_float_dtype, is_integer, @@ -246,19 +245,6 @@ def get_datevalue(date, freq): raise ValueError(f"Unrecognizable date '{date}'") -def _dt_to_float_ordinal(dt): - """ - Convert :mod:`datetime` to the Gregorian date as UTC float days, - preserving hours, minutes, seconds and microseconds. Return value - is a :func:`float`. - """ - if isinstance(dt, (np.ndarray, Index, Series)) and is_datetime64_ns_dtype(dt): - base = dates.epoch2num(dt.asi8 / 1.0e9) - else: - base = dates.date2num(dt) - return base - - # Datetime Conversion class DatetimeConverter(dates.DateConverter): @staticmethod @@ -274,15 +260,11 @@ def convert(values, unit, axis): def _convert_1d(values, unit, axis): def try_parse(values): try: - return _dt_to_float_ordinal(tools.to_datetime(values)) + return dates.date2num(tools.to_datetime(values)) except Exception: return values - if isinstance(values, (datetime, pydt.date)): - return _dt_to_float_ordinal(values) - elif isinstance(values, np.datetime64): - return _dt_to_float_ordinal(Timestamp(values)) - elif isinstance(values, pydt.time): + if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): return dates.date2num(values) elif is_integer(values) or is_float(values): return values @@ -303,12 +285,10 @@ def try_parse(values): try: values = tools.to_datetime(values) - if isinstance(values, Index): - values = _dt_to_float_ordinal(values) - else: - values = [_dt_to_float_ordinal(x) for x in values] except Exception: - values = _dt_to_float_ordinal(values) + pass + + values = dates.date2num(values) return values @@ -411,8 +391,8 @@ def __call__(self): interval = self._get_interval() freq = f"{interval}L" tz = self.tz.tzname(None) - st = _from_ordinal(dates.date2num(dmin)) # strip tz - ed = _from_ordinal(dates.date2num(dmax)) + st = dmin.replace(tzinfo=None) + ed = dmin.replace(tzinfo=None) all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) try: diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index df2c9ecbd7a0a..b2eeb649276d5 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -27,6 +27,7 @@ pass pytest.importorskip("matplotlib.pyplot") +dates = pytest.importorskip("matplotlib.dates") def test_registry_mpl_resets(): @@ -146,7 +147,7 @@ def test_convert_accepts_unicode(self): def test_conversion(self): rs = self.dtc.convert(["2012-1-1"], None, None)[0] - xp = datetime(2012, 1, 1).toordinal() + xp = dates.date2num(datetime(2012, 1, 1)) assert rs == xp rs = self.dtc.convert("2012-1-1", None, None) @@ -155,9 +156,6 @@ def test_conversion(self): rs = self.dtc.convert(date(2012, 1, 1), None, None) assert rs == xp - rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None) - assert rs == xp - rs = self.dtc.convert("2012-1-1", None, None) assert rs == xp diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 201856669103a..ecf378d4fc04a 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -331,7 +331,7 @@ def test_freq_with_no_period_alias(self): bts = tm.makeTimeSeries(5).asfreq(freq) _, ax = self.plt.subplots() bts.plot(ax=ax) - assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].toordinal() + idx = ax.get_lines()[0].get_xdata() msg = "freq not specified and cannot be inferred" with pytest.raises(ValueError, match=msg): @@ -1279,6 +1279,8 @@ def test_mpl_nopandas(self): @pytest.mark.slow def test_irregular_ts_shared_ax_xlim(self): # GH 2960 + from pandas.plotting._matplotlib.converter import DatetimeConverter + ts = tm.makeTimeSeries()[:20] ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] @@ -1289,8 +1291,8 @@ def test_irregular_ts_shared_ax_xlim(self): # check that axis limits are correct left, right = ax.get_xlim() - assert left <= ts_irregular.index.min().toordinal() - assert right >= ts_irregular.index.max().toordinal() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) @pytest.mark.slow def test_secondary_y_non_ts_xlim(self): @@ -1345,6 +1347,8 @@ def test_secondary_y_mixed_freq_ts_xlim(self): @pytest.mark.slow def test_secondary_y_irregular_ts_xlim(self): # GH 3490 - irregular-timeseries with secondary y + from pandas.plotting._matplotlib.converter import DatetimeConverter + ts = tm.makeTimeSeries()[:20] ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] @@ -1356,8 +1360,8 @@ def test_secondary_y_irregular_ts_xlim(self): ts_irregular[:5].plot(ax=ax) left, right = ax.get_xlim() - assert left <= ts_irregular.index.min().toordinal() - assert right >= ts_irregular.index.max().toordinal() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) def test_plot_outofbounds_datetime(self): # 2579 - checking this does not raise diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 3d85e79b15c4c..317a994bd9a32 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1563,6 +1563,7 @@ def test_boxplot(self): ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1) ) assert len(ax.lines) == self.bp_n_objects * len(numeric_cols) + tm.close() axes = series.plot.box(rot=40) self._check_ticks_props(axes, xrot=40, yrot=0) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 316ca6ce91af7..151bb3bed7207 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -274,12 +274,14 @@ def test_rotation(self): self._check_ticks_props(axes, xrot=30) def test_irregular_datetime(self): + from pandas.plotting._matplotlib.converter import DatetimeConverter + rng = date_range("1/1/2000", "3/1/2000") rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] ser = Series(randn(len(rng)), rng) _, ax = self.plt.subplots() ax = ser.plot(ax=ax) - xp = datetime(1999, 1, 1).toordinal() + xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax) ax.set_xlim("1/1/1999", "1/1/2001") assert xp == ax.get_xlim()[0] @@ -684,11 +686,13 @@ def test_kind_both_ways(self): kinds = ( plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds ) - _, ax = self.plt.subplots() for kind in kinds: - + _, ax = self.plt.subplots() s.plot(kind=kind, ax=ax) + self.plt.close() + _, ax = self.plt.subplots() getattr(s.plot, kind)() + self.plt.close() @pytest.mark.slow def test_invalid_plot_data(self): From 8f54d1401e9924627618c74db9d4671b72b7cb2d Mon Sep 17 00:00:00 2001 From: alm Date: Fri, 24 Jul 2020 15:46:54 +0300 Subject: [PATCH 171/632] Remove leftover partial-result code (#35397) --- pandas/core/apply.py | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/pandas/core/apply.py b/pandas/core/apply.py index d4be660939773..733dbeed34b72 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -252,33 +252,20 @@ def apply_broadcast(self, target: "DataFrame") -> "DataFrame": return result def apply_standard(self): - - # partial result that may be returned from reduction - partial_result = None - - # compute the result using the series generator, - # use the result computed while trying to reduce if available. - results, res_index = self.apply_series_generator(partial_result) + results, res_index = self.apply_series_generator() # wrap results return self.wrap_results(results, res_index) - def apply_series_generator(self, partial_result=None) -> Tuple[ResType, "Index"]: + def apply_series_generator(self) -> Tuple[ResType, "Index"]: series_gen = self.series_generator res_index = self.result_index results = {} - # If a partial result was already computed, - # use it instead of running on the first element again - series_gen_enumeration = enumerate(series_gen) - if partial_result is not None: - i, v = next(series_gen_enumeration) - results[i] = partial_result - if self.ignore_failures: successes = [] - for i, v in series_gen_enumeration: + for i, v in enumerate(series_gen): try: results[i] = self.f(v) except Exception: @@ -292,7 +279,7 @@ def apply_series_generator(self, partial_result=None) -> Tuple[ResType, "Index"] else: with option_context("mode.chained_assignment", None): - for i, v in series_gen_enumeration: + for i, v in enumerate(series_gen): # ignore SettingWithCopy here in case the user mutates results[i] = self.f(v) if isinstance(results[i], ABCSeries): From 04e9e0afd476b1b8bed930e47bf60ee20fa78f38 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Fri, 24 Jul 2020 09:06:26 -0400 Subject: [PATCH 172/632] DOC: Fixed formatting and errors in whatsnew v1.1.0 (#35398) --- doc/source/whatsnew/v1.1.0.rst | 305 ++++++++++++++++----------------- 1 file changed, 152 insertions(+), 153 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index b1ab54a608748..c2a4abbea107c 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -42,8 +42,8 @@ For example, the below now works: .. _whatsnew_110.period_index_partial_string_slicing: -Nonmonotonic PeriodIndex Partial String Slicing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Non-monotonic PeriodIndex Partial String Slicing +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :class:`PeriodIndex` now supports partial string slicing for non-monotonic indexes, mirroring :class:`DatetimeIndex` behavior (:issue:`31096`) @@ -130,7 +130,7 @@ The default setting of ``dropna`` argument is ``True`` which means ``NA`` are no Sorting with keys ^^^^^^^^^^^^^^^^^ -We've added a ``key`` argument to the DataFrame and Series sorting methods, including +We've added a ``key`` argument to the :class:`DataFrame` and :class:`Series` sorting methods, including :meth:`DataFrame.sort_values`, :meth:`DataFrame.sort_index`, :meth:`Series.sort_values`, and :meth:`Series.sort_index`. The ``key`` can be any callable function which is applied column-by-column to each column used for sorting, before sorting is performed (:issue:`27237`). @@ -215,14 +215,14 @@ For example: Grouper and resample now supports the arguments origin and offset ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:class:`Grouper` and :class:`DataFrame.resample` now supports the arguments ``origin`` and ``offset``. It let the user control the timestamp on which to adjust the grouping. (:issue:`31809`) +:class:`Grouper` and :meth:`DataFrame.resample` now supports the arguments ``origin`` and ``offset``. It let the user control the timestamp on which to adjust the grouping. (:issue:`31809`) The bins of the grouping are adjusted based on the beginning of the day of the time series starting point. This works well with frequencies that are multiples of a day (like `30D`) or that divides a day (like `90s` or `1min`). But it can create inconsistencies with some frequencies that do not meet this criteria. To change this behavior you can now specify a fixed timestamp with the argument ``origin``. -Two arguments are now deprecated (more information in the documentation of :class:`DataFrame.resample`): +Two arguments are now deprecated (more information in the documentation of :meth:`DataFrame.resample`): - ``base`` should be replaced by ``offset``. -- ``loffset`` should be replaced by directly adding an offset to the index DataFrame after being resampled. +- ``loffset`` should be replaced by directly adding an offset to the index :class:`DataFrame` after being resampled. Small example of the use of ``origin``: @@ -248,7 +248,7 @@ Resample using a fixed origin: ts.resample('17min', origin='epoch').sum() ts.resample('17min', origin='2000-01-01').sum() -If needed you can adjust the bins with the argument ``offset`` (a Timedelta) that would be added to the default ``origin``. +If needed you can adjust the bins with the argument ``offset`` (a :class:`Timedelta`) that would be added to the default ``origin``. For a full example, see: :ref:`timeseries.adjust-the-start-of-the-bins`. @@ -286,10 +286,10 @@ Other enhancements - :meth:`~pandas.io.formats.style.Styler.highlight_null` now accepts ``subset`` argument (:issue:`31345`) - When writing directly to a sqlite connection :meth:`DataFrame.to_sql` now supports the ``multi`` method (:issue:`29921`) - :class:`pandas.errors.OptionError` is now exposed in ``pandas.errors`` (:issue:`27553`) -- Add :meth:`api.extensions.ExtensionArray.argmax` and :meth:`api.extensions.ExtensionArray.argmin` (:issue:`24382`) +- Added :meth:`api.extensions.ExtensionArray.argmax` and :meth:`api.extensions.ExtensionArray.argmin` (:issue:`24382`) - :func:`timedelta_range` will now infer a frequency when passed ``start``, ``stop``, and ``periods`` (:issue:`32377`) - Positional slicing on a :class:`IntervalIndex` now supports slices with ``step > 1`` (:issue:`31658`) -- :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the series, similar to `re.fullmatch` (:issue:`32806`). +- :class:`Series.str` now has a `fullmatch` method that matches a regular expression against the entire string in each row of the :class:`Series`, similar to `re.fullmatch` (:issue:`32806`). - :meth:`DataFrame.sample` will now also allow array-like and BitGenerator objects to be passed to ``random_state`` as seeds (:issue:`32503`) - :meth:`Index.union` will now raise ``RuntimeWarning`` for :class:`MultiIndex` objects if the object inside are unsortable. Pass ``sort=False`` to suppress this warning (:issue:`33015`) - Added :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returns a :class:`DataFrame` with year, week, and day calculated according to the ISO 8601 calendar (:issue:`33206`, :issue:`34392`). @@ -306,37 +306,37 @@ Other enhancements - :meth:`melt` has gained an ``ignore_index`` (default ``True``) argument that, if set to ``False``, prevents the method from dropping the index (:issue:`17440`). - :meth:`Series.update` now accepts objects that can be coerced to a :class:`Series`, such as ``dict`` and ``list``, mirroring the behavior of :meth:`DataFrame.update` (:issue:`33215`) -- :meth:`~pandas.core.groupby.DataFrameGroupBy.transform` and :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` has gained ``engine`` and ``engine_kwargs`` arguments that supports executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) +- :meth:`~pandas.core.groupby.DataFrameGroupBy.transform` and :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` have gained ``engine`` and ``engine_kwargs`` arguments that support executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) - :meth:`~pandas.core.resample.Resampler.interpolate` now supports SciPy interpolation method :class:`scipy.interpolate.CubicSpline` as method ``cubicspline`` (:issue:`33670`) - :class:`~pandas.core.groupby.DataFrameGroupBy` and :class:`~pandas.core.groupby.SeriesGroupBy` now implement the ``sample`` method for doing random sampling within groups (:issue:`31775`) - :meth:`DataFrame.to_numpy` now supports the ``na_value`` keyword to control the NA sentinel in the output array (:issue:`33820`) -- Added :class:`api.extension.ExtensionArray.equals` to the extension array interface, similarl to :meth:`Series.equals` (:issue:`27081`). -- The minimum supported dta version has increased to 105 in :meth:`read_stata` and :class:`~pandas.io.stata.StataReader` (:issue:`26667`). +- Added :class:`api.extension.ExtensionArray.equals` to the extension array interface, similar to :meth:`Series.equals` (:issue:`27081`) +- The minimum supported dta version has increased to 105 in :func:`read_stata` and :class:`~pandas.io.stata.StataReader` (:issue:`26667`). - :meth:`~DataFrame.to_stata` supports compression using the ``compression`` keyword argument. Compression can either be inferred or explicitly set using a string or a dictionary containing both the method and any additional arguments that are passed to the compression library. Compression was also added to the low-level Stata-file writers :class:`~pandas.io.stata.StataWriter`, :class:`~pandas.io.stata.StataWriter117`, and :class:`~pandas.io.stata.StataWriterUTF8` (:issue:`26599`). -- :meth:`HDFStore.put` now accepts a ``track_times`` parameter. Parameter is passed to ``create_table`` method of ``PyTables`` (:issue:`32682`). +- :meth:`HDFStore.put` now accepts a ``track_times`` parameter. This parameter is passed to the ``create_table`` method of ``PyTables`` (:issue:`32682`). - :meth:`Series.plot` and :meth:`DataFrame.plot` now accepts `xlabel` and `ylabel` parameters to present labels on x and y axis (:issue:`9093`). -- Make :class:`pandas.core.window.rolling.Rolling` and :class:`pandas.core.window.expanding.Expanding` iterable(:issue:`11704`) -- Make ``option_context`` a :class:`contextlib.ContextDecorator`, which allows it to be used as a decorator over an entire function (:issue:`34253`). +- Made :class:`pandas.core.window.rolling.Rolling` and :class:`pandas.core.window.expanding.Expanding` iterable(:issue:`11704`) +- Made ``option_context`` a :class:`contextlib.ContextDecorator`, which allows it to be used as a decorator over an entire function (:issue:`34253`). - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now accept an ``errors`` argument (:issue:`22610`) - :meth:`~pandas.core.groupby.DataFrameGroupBy.groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). -- :meth:`read_json` now accepts an ``nrows`` parameter. (:issue:`33916`). -- :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`)0 +- :func:`read_json` now accepts an ``nrows`` parameter. (:issue:`33916`). +- :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`) - :func:`concat` and :meth:`~DataFrame.append` now preserve extension dtypes, for example combining a nullable integer column with a numpy integer column will no longer result in object dtype but preserve the integer dtype (:issue:`33607`, :issue:`34339`, :issue:`34095`). -- :meth:`read_gbq` now allows to disable progress bar (:issue:`33360`). -- :meth:`read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). -- :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ddof to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`). +- :func:`read_gbq` now allows to disable progress bar (:issue:`33360`). +- :func:`read_gbq` now supports the ``max_results`` kwarg from ``pandas-gbq`` (:issue:`34639`). +- :meth:`DataFrame.cov` and :meth:`Series.cov` now support a new parameter ``ddof`` to support delta degrees of freedom as in the corresponding numpy methods (:issue:`34611`). - :meth:`DataFrame.to_html` and :meth:`DataFrame.to_string`'s ``col_space`` parameter now accepts a list or dict to change only some specific columns' width (:issue:`28917`). - :meth:`DataFrame.to_excel` can now also write OpenOffice spreadsheet (.ods) files (:issue:`27222`) -- :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similarly to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). +- :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similar to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). - :meth:`DataFrame.to_markdown` and :meth:`Series.to_markdown` now accept ``index`` argument as an alias for tabulate's ``showindex`` (:issue:`32667`) -- :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable boolean dtype (:issue:`34859`) +- :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable Boolean dtype (:issue:`34859`) - :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) - ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`, :issue:`35374`) @@ -417,7 +417,7 @@ Failed Label-Based Lookups Always Raise KeyError ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Label lookups ``series[key]``, ``series.loc[key]`` and ``frame.loc[key]`` -used to raises either ``KeyError`` or ``TypeError`` depending on the type of +used to raise either ``KeyError`` or ``TypeError`` depending on the type of key and type of :class:`Index`. These now consistently raise ``KeyError`` (:issue:`31867`) .. ipython:: python @@ -488,7 +488,7 @@ Similarly, :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` Failed Integer Lookups on MultiIndex Raise KeyError ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Indexing with integers with a :class:`MultiIndex` that has a integer-dtype +Indexing with integers with a :class:`MultiIndex` that has an integer-dtype first level incorrectly failed to raise ``KeyError`` when one or more of those integer keys is not present in the first level of the index (:issue:`33539`) @@ -516,7 +516,7 @@ those integer keys is not present in the first level of the index (:issue:`33539 :meth:`DataFrame.merge` preserves right frame's row order ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:meth:`DataFrame.merge` now preserves right frame's row order when executing a right merge (:issue:`27453`) +:meth:`DataFrame.merge` now preserves the right frame's row order when executing a right merge (:issue:`27453`) .. ipython:: python @@ -549,7 +549,7 @@ those integer keys is not present in the first level of the index (:issue:`33539 Assignment to multiple columns of a DataFrame when some columns do not exist ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Assignment to multiple columns of a :class:`DataFrame` when some of the columns do not exist would previously assign the values to the last column. Now, new columns would be constructed with the right values. (:issue:`13658`) +Assignment to multiple columns of a :class:`DataFrame` when some of the columns do not exist would previously assign the values to the last column. Now, new columns will be constructed with the right values. (:issue:`13658`) .. ipython:: python @@ -622,7 +622,7 @@ Using :meth:`DataFrame.groupby` with ``as_index=False`` and the function ``idxma df.groupby("a", as_index=False).nunique() -The method :meth:`~pandas.core.groupby.DataFrameGroupBy.size` would previously ignore ``as_index=False``. Now the grouping columns are returned as columns, making the result a `DataFrame` instead of a `Series`. (:issue:`32599`) +The method :meth:`~pandas.core.groupby.DataFrameGroupBy.size` would previously ignore ``as_index=False``. Now the grouping columns are returned as columns, making the result a :class:`DataFrame` instead of a :class:`Series`. (:issue:`32599`) *Previous behavior*: @@ -797,37 +797,36 @@ Development Changes Deprecations ~~~~~~~~~~~~ -- Lookups on a :class:`Series` with a single-item list containing a slice (e.g. ``ser[[slice(0, 4)]]``) are deprecated, will raise in a future version. Either convert the list to tuple, or pass the slice directly instead (:issue:`31333`) +- Lookups on a :class:`Series` with a single-item list containing a slice (e.g. ``ser[[slice(0, 4)]]``) are deprecated and will raise in a future version. Either convert the list to a tuple, or pass the slice directly instead (:issue:`31333`) -- :meth:`DataFrame.mean` and :meth:`DataFrame.median` with ``numeric_only=None`` will include datetime64 and datetime64tz columns in a future version (:issue:`29941`) +- :meth:`DataFrame.mean` and :meth:`DataFrame.median` with ``numeric_only=None`` will include ``datetime64`` and ``datetime64tz`` columns in a future version (:issue:`29941`) - Setting values with ``.loc`` using a positional slice is deprecated and will raise in a future version. Use ``.loc`` with labels or ``.iloc`` with positions instead (:issue:`31840`) -- :meth:`DataFrame.to_dict` has deprecated accepting short names for ``orient`` in future versions (:issue:`32515`) +- :meth:`DataFrame.to_dict` has deprecated accepting short names for ``orient`` and will raise in a future version (:issue:`32515`) - :meth:`Categorical.to_dense` is deprecated and will be removed in a future version, use ``np.asarray(cat)`` instead (:issue:`32639`) - The ``fastpath`` keyword in the ``SingleBlockManager`` constructor is deprecated and will be removed in a future version (:issue:`33092`) - Providing ``suffixes`` as a ``set`` in :func:`pandas.merge` is deprecated. Provide a tuple instead (:issue:`33740`, :issue:`34741`). -- Indexing a series with a multi-dimensional indexer like ``[:, None]`` to return an ndarray now raises a ``FutureWarning``. Convert to a NumPy array before indexing instead (:issue:`27837`) +- Indexing a :class:`Series` with a multi-dimensional indexer like ``[:, None]`` to return an ``ndarray`` now raises a ``FutureWarning``. Convert to a NumPy array before indexing instead (:issue:`27837`) - :meth:`Index.is_mixed` is deprecated and will be removed in a future version, check ``index.inferred_type`` directly instead (:issue:`32922`) -- Passing any arguments but the first one to :func:`read_html` as - positional arguments is deprecated since version 1.1. All other +- Passing any arguments but the first one to :func:`read_html` as + positional arguments is deprecated. All other arguments should be given as keyword arguments (:issue:`27573`). - Passing any arguments but ``path_or_buf`` (the first one) to - :func:`read_json` as positional arguments is deprecated since - version 1.1. All other arguments should be given as keyword - arguments (:issue:`27573`). + :func:`read_json` as positional arguments is deprecated. All + other arguments should be given as keyword arguments (:issue:`27573`). -- Passing any arguments but the first 2 to :func:`read_excel` as - positional arguments is deprecated since version 1.1. All other +- Passing any arguments but the first two to :func:`read_excel` as + positional arguments is deprecated. All other arguments should be given as keyword arguments (:issue:`27573`). - :func:`pandas.api.types.is_categorical` is deprecated and will be removed in a future version; use :func:`pandas.api.types.is_categorical_dtype` instead (:issue:`33385`) - :meth:`Index.get_value` is deprecated and will be removed in a future version (:issue:`19728`) -- :meth:`Series.dt.week` and `Series.dt.weekofyear` are deprecated and will be removed in a future version, use :meth:`Series.dt.isocalendar().week` instead (:issue:`33595`) +- :meth:`Series.dt.week` and :meth:`Series.dt.weekofyear` are deprecated and will be removed in a future version, use :meth:`Series.dt.isocalendar().week` instead (:issue:`33595`) - :meth:`DatetimeIndex.week` and ``DatetimeIndex.weekofyear`` are deprecated and will be removed in a future version, use ``DatetimeIndex.isocalendar().week`` instead (:issue:`33595`) - :meth:`DatetimeArray.week` and ``DatetimeArray.weekofyear`` are deprecated and will be removed in a future version, use ``DatetimeArray.isocalendar().week`` instead (:issue:`33595`) - :meth:`DateOffset.__call__` is deprecated and will be removed in a future version, use ``offset + other`` instead (:issue:`34171`) -- :meth:`~pandas.tseries.offsets.BusinessDay.apply_index` is deprecated and will be removed in a future version. Use ``offset + other`` instead (:issue:`34580`) +- :meth:`~pandas.tseries.offsets.BusinessDay.apply_index` is deprecated and will be removed in a future version. Use ``offset + other`` instead (:issue:`34580`) - :meth:`DataFrame.tshift` and :meth:`Series.tshift` are deprecated and will be removed in a future version, use :meth:`DataFrame.shift` and :meth:`Series.shift` instead (:issue:`11631`) - Indexing an :class:`Index` object with a float key is deprecated, and will raise an ``IndexError`` in the future. You can manually convert to an integer key @@ -835,8 +834,8 @@ Deprecations - The ``squeeze`` keyword in :meth:`~DataFrame.groupby` is deprecated and will be removed in a future version (:issue:`32380`) - The ``tz`` keyword in :meth:`Period.to_timestamp` is deprecated and will be removed in a future version; use ``per.to_timestamp(...).tz_localize(tz)`` instead (:issue:`34522`) - :meth:`DatetimeIndex.to_perioddelta` is deprecated and will be removed in a future version. Use ``index - index.to_period(freq).to_timestamp()`` instead (:issue:`34853`) -- :func:`DataFrame.melt` accepting a value_name that already exists is deprecated, and will be removed in a future version (:issue:`34731`) -- the ``center`` keyword in the :meth:`DataFrame.expanding` function is deprecated and will be removed in a future version (:issue:`20647`) +- :meth:`DataFrame.melt` accepting a ``value_name`` that already exists is deprecated, and will be removed in a future version (:issue:`34731`) +- The ``center`` keyword in the :meth:`DataFrame.expanding` function is deprecated and will be removed in a future version (:issue:`20647`) @@ -851,7 +850,7 @@ Performance improvements - Performance improvement in :class:`Timedelta` constructor (:issue:`30543`) - Performance improvement in :class:`Timestamp` constructor (:issue:`30543`) - Performance improvement in flex arithmetic ops between :class:`DataFrame` and :class:`Series` with ``axis=0`` (:issue:`31296`) -- Performance improvement in arithmetic ops between :class:`DataFrame` and :class:`Series` with ``axis=1`` (:issue:`33600`) +- Performance improvement in arithmetic ops between :class:`DataFrame` and :class:`Series` with ``axis=1`` (:issue:`33600`) - The internal index method :meth:`~Index._shallow_copy` now copies cached attributes over to the new index, avoiding creating these again on the new index. This can speed up many operations that depend on creating copies of existing indexes (:issue:`28584`, :issue:`32640`, :issue:`32669`) @@ -861,14 +860,14 @@ Performance improvements :issue:`32825`, :issue:`32826`, :issue:`32856`, :issue:`32858`). - Performance improvement for groupby methods :meth:`~pandas.core.groupby.groupby.Groupby.first` and :meth:`~pandas.core.groupby.groupby.Groupby.last` (:issue:`34178`) -- Performance improvement in :func:`factorize` for nullable (integer and boolean) dtypes (:issue:`33064`). +- Performance improvement in :func:`factorize` for nullable (integer and Boolean) dtypes (:issue:`33064`). - Performance improvement when constructing :class:`Categorical` objects (:issue:`33921`) - Fixed performance regression in :func:`pandas.qcut` and :func:`pandas.cut` (:issue:`33921`) -- Performance improvement in reductions (sum, prod, min, max) for nullable (integer and boolean) dtypes (:issue:`30982`, :issue:`33261`, :issue:`33442`). +- Performance improvement in reductions (``sum``, ``prod``, ``min``, ``max``) for nullable (integer and Boolean) dtypes (:issue:`30982`, :issue:`33261`, :issue:`33442`). - Performance improvement in arithmetic operations between two :class:`DataFrame` objects (:issue:`32779`) - Performance improvement in :class:`pandas.core.groupby.RollingGroupby` (:issue:`34052`) -- Performance improvement in arithmetic operations (sub, add, mul, div) for MultiIndex (:issue:`34297`) -- Performance improvement in `DataFrame[bool_indexer]` when `bool_indexer` is a list (:issue:`33924`) +- Performance improvement in arithmetic operations (``sub``, ``add``, ``mul``, ``div``) for :class:`MultiIndex` (:issue:`34297`) +- Performance improvement in ``DataFrame[bool_indexer]`` when ``bool_indexer`` is a ``list`` (:issue:`33924`) - Significant performance improvement of :meth:`io.formats.style.Styler.render` with styles added with various ways such as :meth:`io.formats.style.Styler.apply`, :meth:`io.formats.style.Styler.applymap` or :meth:`io.formats.style.Styler.bar` (:issue:`19917`) .. --------------------------------------------------------------------------- @@ -883,12 +882,12 @@ Categorical ^^^^^^^^^^^ - Passing an invalid ``fill_value`` to :meth:`Categorical.take` raises a ``ValueError`` instead of ``TypeError`` (:issue:`33660`) -- Combining a ``Categorical`` with integer categories and which contains missing values with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append` will now result in a float column instead of an object dtyped column (:issue:`33607`) +- Combining a :class:`Categorical` with integer categories and which contains missing values with a float dtype column in operations such as :func:`concat` or :meth:`~DataFrame.append` will now result in a float column instead of an object dtype column (:issue:`33607`) - Bug where :func:`merge` was unable to join on non-unique categorical indices (:issue:`28189`) - Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`) - Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`) - :meth:`Categorical.fillna` now accepts :class:`Categorical` ``other`` argument (:issue:`32420`) -- Repr of :class:`Categorical` was not distinguishing between int and str (:issue:`33676`) +- Repr of :class:`Categorical` was not distinguishing between ``int`` and ``str`` (:issue:`33676`) Datetimelike ^^^^^^^^^^^^ @@ -897,30 +896,30 @@ Datetimelike - :meth:`Series.to_timestamp` now raises a ``TypeError`` if the axis is not a :class:`PeriodIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) - :meth:`Series.to_period` now raises a ``TypeError`` if the axis is not a :class:`DatetimeIndex`. Previously an ``AttributeError`` was raised (:issue:`33327`) - :class:`Period` no longer accepts tuples for the ``freq`` argument (:issue:`34658`) -- Bug in :class:`Timestamp` where constructing :class:`Timestamp` from ambiguous epoch time and calling constructor again changed :meth:`Timestamp.value` property (:issue:`24329`) +- Bug in :class:`Timestamp` where constructing a :class:`Timestamp` from ambiguous epoch time and calling constructor again changed the :meth:`Timestamp.value` property (:issue:`24329`) - :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`) - Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`) - Bug in :meth:`Period.to_timestamp`, :meth:`Period.start_time` with microsecond frequency returning a timestamp one nanosecond earlier than the correct time (:issue:`31475`) -- :class:`Timestamp` raising confusing error message when year, month or day is missing (:issue:`31200`) -- Bug in :class:`DatetimeIndex` constructor incorrectly accepting ``bool``-dtyped inputs (:issue:`32668`) +- :class:`Timestamp` raised a confusing error message when year, month or day is missing (:issue:`31200`) +- Bug in :class:`DatetimeIndex` constructor incorrectly accepting ``bool``-dtype inputs (:issue:`32668`) - Bug in :meth:`DatetimeIndex.searchsorted` not accepting a ``list`` or :class:`Series` as its argument (:issue:`32762`) - Bug where :meth:`PeriodIndex` raised when passed a :class:`Series` of strings (:issue:`26109`) -- Bug in :class:`Timestamp` arithmetic when adding or subtracting a ``np.ndarray`` with ``timedelta64`` dtype (:issue:`33296`) -- Bug in :meth:`DatetimeIndex.to_period` not infering the frequency when called with no arguments (:issue:`33358`) -- Bug in :meth:`DatetimeIndex.tz_localize` incorrectly retaining ``freq`` in some cases where the original freq is no longer valid (:issue:`30511`) +- Bug in :class:`Timestamp` arithmetic when adding or subtracting an ``np.ndarray`` with ``timedelta64`` dtype (:issue:`33296`) +- Bug in :meth:`DatetimeIndex.to_period` not inferring the frequency when called with no arguments (:issue:`33358`) +- Bug in :meth:`DatetimeIndex.tz_localize` incorrectly retaining ``freq`` in some cases where the original ``freq`` is no longer valid (:issue:`30511`) - Bug in :meth:`DatetimeIndex.intersection` losing ``freq`` and timezone in some cases (:issue:`33604`) - Bug in :meth:`DatetimeIndex.get_indexer` where incorrect output would be returned for mixed datetime-like targets (:issue:`33741`) - Bug in :class:`DatetimeIndex` addition and subtraction with some types of :class:`DateOffset` objects incorrectly retaining an invalid ``freq`` attribute (:issue:`33779`) - Bug in :class:`DatetimeIndex` where setting the ``freq`` attribute on an index could silently change the ``freq`` attribute on another index viewing the same data (:issue:`33552`) -- :meth:`DataFrame.min`/:meth:`DataFrame.max` not returning consistent result with :meth:`Series.min`/:meth:`Series.max` when called on objects initialized with empty :func:`pd.to_datetime` +- :meth:`DataFrame.min` and :meth:`DataFrame.max` were not returning consistent results with :meth:`Series.min` and :meth:`Series.max` when called on objects initialized with empty :func:`pd.to_datetime` - Bug in :meth:`DatetimeIndex.intersection` and :meth:`TimedeltaIndex.intersection` with results not having the correct ``name`` attribute (:issue:`33904`) - Bug in :meth:`DatetimeArray.__setitem__`, :meth:`TimedeltaArray.__setitem__`, :meth:`PeriodArray.__setitem__` incorrectly allowing values with ``int64`` dtype to be silently cast (:issue:`33717`) - Bug in subtracting :class:`TimedeltaIndex` from :class:`Period` incorrectly raising ``TypeError`` in some cases where it should succeed and ``IncompatibleFrequency`` in some cases where it should raise ``TypeError`` (:issue:`33883`) -- Bug in constructing a Series or Index from a read-only NumPy array with non-ns +- Bug in constructing a :class:`Series` or :class:`Index` from a read-only NumPy array with non-ns resolution which converted to object dtype instead of coercing to ``datetime64[ns]`` dtype when within the timestamp bounds (:issue:`34843`). - The ``freq`` keyword in :class:`Period`, :func:`date_range`, :func:`period_range`, :func:`pd.tseries.frequencies.to_offset` no longer allows tuples, pass as string instead (:issue:`34703`) -- Bug in :meth:`DataFrame.append` when appending a :class:`Series` containing a scalar tz-aware :class:`Timestamp` to an empty :class:`DataFrame` resulted in an object column instead of datetime64[ns, tz] dtype (:issue:`35038`) +- Bug in :meth:`DataFrame.append` when appending a :class:`Series` containing a scalar tz-aware :class:`Timestamp` to an empty :class:`DataFrame` resulted in an object column instead of ``datetime64[ns, tz]`` dtype (:issue:`35038`) - ``OutOfBoundsDatetime`` issues an improved error message when timestamp is out of implementation bounds. (:issue:`32967`) - Bug in :meth:`AbstractHolidayCalendar.holidays` when no rules were defined (:issue:`31415`) - Bug in :class:`Tick` comparisons raising ``TypeError`` when comparing against timedelta-like objects (:issue:`34088`) @@ -931,13 +930,13 @@ Timedelta - Bug in constructing a :class:`Timedelta` with a high precision integer that would round the :class:`Timedelta` components (:issue:`31354`) - Bug in dividing ``np.nan`` or ``None`` by :class:`Timedelta` incorrectly returning ``NaT`` (:issue:`31869`) -- Timedeltas now understand ``µs`` as identifier for microsecond (:issue:`32899`) +- :class:`Timedelta` now understands ``µs`` as an identifier for microsecond (:issue:`32899`) - :class:`Timedelta` string representation now includes nanoseconds, when nanoseconds are non-zero (:issue:`9309`) -- Bug in comparing a :class:`Timedelta` object against a ``np.ndarray`` with ``timedelta64`` dtype incorrectly viewing all entries as unequal (:issue:`33441`) +- Bug in comparing a :class:`Timedelta` object against an ``np.ndarray`` with ``timedelta64`` dtype incorrectly viewing all entries as unequal (:issue:`33441`) - Bug in :func:`timedelta_range` that produced an extra point on a edge case (:issue:`30353`, :issue:`33498`) - Bug in :meth:`DataFrame.resample` that produced an extra point on a edge case (:issue:`30353`, :issue:`13022`, :issue:`33498`) - Bug in :meth:`DataFrame.resample` that ignored the ``loffset`` argument when dealing with timedelta (:issue:`7687`, :issue:`33498`) -- Bug in :class:`Timedelta` and `pandas.to_timedelta` that ignored `unit`-argument for string input (:issue:`12136`) +- Bug in :class:`Timedelta` and :func:`pandas.to_timedelta` that ignored the ``unit`` argument for string input (:issue:`12136`) Timezones ^^^^^^^^^ @@ -948,24 +947,24 @@ Timezones Numeric ^^^^^^^ - Bug in :meth:`DataFrame.floordiv` with ``axis=0`` not treating division-by-zero like :meth:`Series.floordiv` (:issue:`31271`) -- Bug in :meth:`to_numeric` with string argument ``"uint64"`` and ``errors="coerce"`` silently fails (:issue:`32394`) -- Bug in :meth:`to_numeric` with ``downcast="unsigned"`` fails for empty data (:issue:`32493`) +- Bug in :func:`to_numeric` with string argument ``"uint64"`` and ``errors="coerce"`` silently fails (:issue:`32394`) +- Bug in :func:`to_numeric` with ``downcast="unsigned"`` fails for empty data (:issue:`32493`) - Bug in :meth:`DataFrame.mean` with ``numeric_only=False`` and either ``datetime64`` dtype or ``PeriodDtype`` column incorrectly raising ``TypeError`` (:issue:`32426`) - Bug in :meth:`DataFrame.count` with ``level="foo"`` and index level ``"foo"`` containing NaNs causes segmentation fault (:issue:`21824`) - Bug in :meth:`DataFrame.diff` with ``axis=1`` returning incorrect results with mixed dtypes (:issue:`32995`) - Bug in :meth:`DataFrame.corr` and :meth:`DataFrame.cov` raising when handling nullable integer columns with ``pandas.NA`` (:issue:`33803`) -- Bug in arithmetic operations between ``DataFrame`` objects with non-overlapping columns with duplicate labels causing an infinite loop (:issue:`35194`) +- Bug in arithmetic operations between :class:`DataFrame` objects with non-overlapping columns with duplicate labels causing an infinite loop (:issue:`35194`) - Bug in :class:`DataFrame` and :class:`Series` addition and subtraction between object-dtype objects and ``datetime64`` dtype objects (:issue:`33824`) -- Bug in :meth:`Index.difference` incorrect results when comparing a :class:`Float64Index` and object :class:`Index` (:issue:`35217`) +- Bug in :meth:`Index.difference` giving incorrect results when comparing a :class:`Float64Index` and object :class:`Index` (:issue:`35217`) - Bug in :class:`DataFrame` reductions (e.g. ``df.min()``, ``df.max()``) with ``ExtensionArray`` dtypes (:issue:`34520`, :issue:`32651`) -- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raises ValueError if ``limit_direction`` is 'forward' or 'both' and ``method`` is 'backfill' or 'bfill' or ``limit_direction`` is 'backward' or 'both' and ``method`` is 'pad' or 'ffill' (:issue:`34746`) +- :meth:`Series.interpolate` and :meth:`DataFrame.interpolate` now raise a ValueError if ``limit_direction`` is ``'forward'`` or ``'both'`` and ``method`` is ``'backfill'`` or ``'bfill'`` or ``limit_direction`` is ``'backward'`` or ``'both'`` and ``method`` is ``'pad'`` or ``'ffill'`` (:issue:`34746`) Conversion ^^^^^^^^^^ - Bug in :class:`Series` construction from NumPy array with big-endian ``datetime64`` dtype (:issue:`29684`) - Bug in :class:`Timedelta` construction with large nanoseconds keyword value (:issue:`32402`) - Bug in :class:`DataFrame` construction where sets would be duplicated rather than raising (:issue:`32582`) -- The :class:`DataFrame` constructor no longer accepts a list of ``DataFrame`` objects. Because of changes to NumPy, ``DataFrame`` objects are now consistently treated as 2D objects, so a list of ``DataFrames`` is considered 3D, and no longer acceptible for the ``DataFrame`` constructor (:issue:`32289`). +- The :class:`DataFrame` constructor no longer accepts a list of :class:`DataFrame` objects. Because of changes to NumPy, :class:`DataFrame` objects are now consistently treated as 2D objects, so a list of :class:`DataFrame` objects is considered 3D, and no longer acceptable for the :class:`DataFrame` constructor (:issue:`32289`). - Bug in :class:`DataFrame` when initiating a frame with lists and assign ``columns`` with nested list for ``MultiIndex`` (:issue:`32173`) - Improved error message for invalid construction of list when creating a new index (:issue:`35190`) @@ -996,42 +995,42 @@ Indexing - Bug in :meth:`DataFrame.at` when either columns or index is non-unique (:issue:`33041`) - Bug in :meth:`Series.loc` and :meth:`DataFrame.loc` when indexing with an integer key on a object-dtype :class:`Index` that is not all-integers (:issue:`31905`) - Bug in :meth:`DataFrame.iloc.__setitem__` on a :class:`DataFrame` with duplicate columns incorrectly setting values for all matching columns (:issue:`15686`, :issue:`22036`) -- Bug in :meth:`DataFrame.loc:` and :meth:`Series.loc` with a :class:`DatetimeIndex`, :class:`TimedeltaIndex`, or :class:`PeriodIndex` incorrectly allowing lookups of non-matching datetime-like dtypes (:issue:`32650`) +- Bug in :meth:`DataFrame.loc` and :meth:`Series.loc` with a :class:`DatetimeIndex`, :class:`TimedeltaIndex`, or :class:`PeriodIndex` incorrectly allowing lookups of non-matching datetime-like dtypes (:issue:`32650`) - Bug in :meth:`Series.__getitem__` indexing with non-standard scalars, e.g. ``np.dtype`` (:issue:`32684`) -- Bug in :class:`Index` constructor where an unhelpful error message was raised for ``numpy`` scalars (:issue:`33017`) +- Bug in :class:`Index` constructor where an unhelpful error message was raised for NumPy scalars (:issue:`33017`) - Bug in :meth:`DataFrame.lookup` incorrectly raising an ``AttributeError`` when ``frame.index`` or ``frame.columns`` is not unique; this will now raise a ``ValueError`` with a helpful error message (:issue:`33041`) - Bug in :meth:`DataFrame.iloc.__setitem__` creating a new array instead of overwriting ``Categorical`` values in-place (:issue:`32831`) - Bug in :class:`Interval` where a :class:`Timedelta` could not be added or subtracted from a :class:`Timestamp` interval (:issue:`32023`) -- Bug in :meth:`DataFrame.copy` _item_cache not invalidated after copy causes post-copy value updates to not be reflected (:issue:`31784`) +- Bug in :meth:`DataFrame.copy` not invalidating _item_cache after copy caused post-copy value updates to not be reflected (:issue:`31784`) - Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) -- Bug in `Series.__getitem__` with an integer key and a :class:`MultiIndex` with leading integer level failing to raise ``KeyError`` if the key is not present in the first level (:issue:`33355`) -- Bug in :meth:`DataFrame.iloc` when slicing a single column-:class:`DataFrame` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`) +- Bug in :meth:`Series.__getitem__` with an integer key and a :class:`MultiIndex` with leading integer level failing to raise ``KeyError`` if the key is not present in the first level (:issue:`33355`) +- Bug in :meth:`DataFrame.iloc` when slicing a single column :class:`DataFrame` with ``ExtensionDtype`` (e.g. ``df.iloc[:, :1]``) returning an invalid result (:issue:`32957`) - Bug in :meth:`DatetimeIndex.insert` and :meth:`TimedeltaIndex.insert` causing index ``freq`` to be lost when setting an element into an empty :class:`Series` (:issue:`33573`) - Bug in :meth:`Series.__setitem__` with an :class:`IntervalIndex` and a list-like key of integers (:issue:`33473`) - Bug in :meth:`Series.__getitem__` allowing missing labels with ``np.ndarray``, :class:`Index`, :class:`Series` indexers but not ``list``, these now all raise ``KeyError`` (:issue:`33646`) - Bug in :meth:`DataFrame.truncate` and :meth:`Series.truncate` where index was assumed to be monotone increasing (:issue:`33756`) -- Indexing with a list of strings representing datetimes failed on :class:`DatetimeIndex` or :class:`PeriodIndex`(:issue:`11278`) +- Indexing with a list of strings representing datetimes failed on :class:`DatetimeIndex` or :class:`PeriodIndex` (:issue:`11278`) - Bug in :meth:`Series.at` when used with a :class:`MultiIndex` would raise an exception on valid inputs (:issue:`26989`) - Bug in :meth:`DataFrame.loc` with dictionary of values changes columns with dtype of ``int`` to ``float`` (:issue:`34573`) -- Bug in :meth:`Series.loc` when used with a :class:`MultiIndex` would raise an IndexingError when accessing a None value (:issue:`34318`) +- Bug in :meth:`Series.loc` when used with a :class:`MultiIndex` would raise an ``IndexingError`` when accessing a ``None`` value (:issue:`34318`) - Bug in :meth:`DataFrame.reset_index` and :meth:`Series.reset_index` would not preserve data types on an empty :class:`DataFrame` or :class:`Series` with a :class:`MultiIndex` (:issue:`19602`) - Bug in :class:`Series` and :class:`DataFrame` indexing with a ``time`` key on a :class:`DatetimeIndex` with ``NaT`` entries (:issue:`35114`) Missing ^^^^^^^ -- Calling :meth:`fillna` on an empty Series now correctly returns a shallow copied object. The behaviour is now consistent with :class:`Index`, :class:`DataFrame` and a non-empty :class:`Series` (:issue:`32543`). -- Bug in :meth:`replace` when argument ``to_replace`` is of type dict/list and is used on a :class:`Series` containing ```` was raising a ``TypeError``. The method now handles this by ignoring ```` values when doing the comparison for the replacement (:issue:`32621`) -- Bug in :meth:`~Series.any` and :meth:`~Series.all` incorrectly returning ```` for all ``False`` or all ``True`` values using the nulllable boolean dtype and with ``skipna=False`` (:issue:`33253`) -- Clarified documentation on interpolate with method =akima. The ``der`` parameter must be scalar or None (:issue:`33426`) -- :meth:`DataFrame.interpolate` uses the correct axis convention now. Previously interpolating along columns lead to interpolation along indices and vice versa. Furthermore interpolating with methods ``pad``, ``ffill``, ``bfill`` and ``backfill`` are identical to using these methods with :meth:`fillna` (:issue:`12918`, :issue:`29146`) -- Bug in :meth:`DataFrame.interpolate` when called on a DataFrame with column names of string type was throwing a ValueError. The method is no independing of the type of column names (:issue:`33956`) -- passing :class:`NA` will into a format string using format specs will now work. For example ``"{:.1f}".format(pd.NA)`` would previously raise a ``ValueError``, but will now return the string ``""`` (:issue:`34740`) +- Calling :meth:`fillna` on an empty :class:`Series` now correctly returns a shallow copied object. The behaviour is now consistent with :class:`Index`, :class:`DataFrame` and a non-empty :class:`Series` (:issue:`32543`). +- Bug in :meth:`Series.replace` when argument ``to_replace`` is of type dict/list and is used on a :class:`Series` containing ```` was raising a ``TypeError``. The method now handles this by ignoring ```` values when doing the comparison for the replacement (:issue:`32621`) +- Bug in :meth:`~Series.any` and :meth:`~Series.all` incorrectly returning ```` for all ``False`` or all ``True`` values using the nulllable Boolean dtype and with ``skipna=False`` (:issue:`33253`) +- Clarified documentation on interpolate with ``method=akima``. The ``der`` parameter must be scalar or ``None`` (:issue:`33426`) +- :meth:`DataFrame.interpolate` uses the correct axis convention now. Previously interpolating along columns lead to interpolation along indices and vice versa. Furthermore interpolating with methods ``pad``, ``ffill``, ``bfill`` and ``backfill`` are identical to using these methods with :meth:`DataFrame.fillna` (:issue:`12918`, :issue:`29146`) +- Bug in :meth:`DataFrame.interpolate` when called on a :class:`DataFrame` with column names of string type was throwing a ValueError. The method is now independent of the type of the column names (:issue:`33956`) +- Passing :class:`NA` into a format string using format specs will now work. For example ``"{:.1f}".format(pd.NA)`` would previously raise a ``ValueError``, but will now return the string ``""`` (:issue:`34740`) - Bug in :meth:`Series.map` not raising on invalid ``na_action`` (:issue:`32815`) MultiIndex ^^^^^^^^^^ -- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`. Previously an ``AttributeError`` was raised (:issue:`31126`) +- :meth:`DataFrame.swaplevels` now raises a ``TypeError`` if the axis is not a :class:`MultiIndex`. Previously an ``AttributeError`` was raised (:issue:`31126`) - Bug in :meth:`Dataframe.loc` when used with a :class:`MultiIndex`. The returned values were not in the same order as the given inputs (:issue:`22797`) .. ipython:: python @@ -1051,128 +1050,128 @@ MultiIndex # Common elements are now guaranteed to be ordered by the left side left.intersection(right, sort=False) -- Bug when joining 2 Multi-indexes, without specifying level with different columns. Return-indexers parameter is ignored. (:issue:`34074`) +- Bug when joining two :class:`MultiIndex` without specifying level with different columns. Return-indexers parameter was ignored. (:issue:`34074`) I/O ^^^ -- Passing a `set` as `names` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) +- Passing a ``set`` as ``names`` argument to :func:`pandas.read_csv`, :func:`pandas.read_table`, or :func:`pandas.read_fwf` will raise ``ValueError: Names should be an ordered collection.`` (:issue:`34946`) - Bug in print-out when ``display.precision`` is zero. (:issue:`20359`) -- Bug in :meth:`read_json` where integer overflow was occurring when json contains big number strings. (:issue:`30320`) -- `read_csv` will now raise a ``ValueError`` when the arguments `header` and `prefix` both are not `None`. (:issue:`27394`) +- Bug in :func:`read_json` where integer overflow was occurring when json contains big number strings. (:issue:`30320`) +- :func:`read_csv` will now raise a ``ValueError`` when the arguments ``header`` and ``prefix`` both are not ``None``. (:issue:`27394`) - Bug in :meth:`DataFrame.to_json` was raising ``NotFoundError`` when ``path_or_buf`` was an S3 URI (:issue:`28375`) - Bug in :meth:`DataFrame.to_parquet` overwriting pyarrow's default for ``coerce_timestamps``; following pyarrow's default allows writing nanosecond timestamps with ``version="2.0"`` (:issue:`31652`). -- Bug in :meth:`read_csv` was raising `TypeError` when `sep=None` was used in combination with `comment` keyword (:issue:`31396`) -- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`) +- Bug in :func:`read_csv` was raising ``TypeError`` when ``sep=None`` was used in combination with ``comment`` keyword (:issue:`31396`) +- Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a :class:`DataFrame` in Python 3 from fixed format written in Python 2 (:issue:`31750`) - :func:`read_sas()` now handles dates and datetimes larger than :attr:`Timestamp.max` returning them as :class:`datetime.datetime` objects (:issue:`20927`) - Bug in :meth:`DataFrame.to_json` where ``Timedelta`` objects would not be serialized correctly with ``date_format="iso"`` (:issue:`28256`) -- :func:`read_csv` will raise a ``ValueError`` when the column names passed in `parse_dates` are missing in the Dataframe (:issue:`31251`) -- Bug in :meth:`read_excel` where a UTF-8 string with a high surrogate would cause a segmentation violation (:issue:`23809`) -- Bug in :meth:`read_csv` was causing a file descriptor leak on an empty file (:issue:`31488`) -- Bug in :meth:`read_csv` was causing a segfault when there were blank lines between the header and data rows (:issue:`28071`) -- Bug in :meth:`read_csv` was raising a misleading exception on a permissions issue (:issue:`23784`) -- Bug in :meth:`read_csv` was raising an ``IndexError`` when header=None and 2 extra data columns -- Bug in :meth:`read_sas` was raising an ``AttributeError`` when reading files from Google Cloud Storage (issue:`33069`) +- :func:`read_csv` will raise a ``ValueError`` when the column names passed in ``parse_dates`` are missing in the :class:`Dataframe` (:issue:`31251`) +- Bug in :func:`read_excel` where a UTF-8 string with a high surrogate would cause a segmentation violation (:issue:`23809`) +- Bug in :func:`read_csv` was causing a file descriptor leak on an empty file (:issue:`31488`) +- Bug in :func:`read_csv` was causing a segfault when there were blank lines between the header and data rows (:issue:`28071`) +- Bug in :func:`read_csv` was raising a misleading exception on a permissions issue (:issue:`23784`) +- Bug in :func:`read_csv` was raising an ``IndexError`` when ``header=None`` and two extra data columns +- Bug in :func:`read_sas` was raising an ``AttributeError`` when reading files from Google Cloud Storage (:issue:`33069`) - Bug in :meth:`DataFrame.to_sql` where an ``AttributeError`` was raised when saving an out of bounds date (:issue:`26761`) -- Bug in :meth:`read_excel` did not correctly handle multiple embedded spaces in OpenDocument text cells. (:issue:`32207`) -- Bug in :meth:`read_json` was raising ``TypeError`` when reading a list of booleans into a Series. (:issue:`31464`) -- Bug in :func:`pandas.io.json.json_normalize` where location specified by `record_path` doesn't point to an array. (:issue:`26284`) +- Bug in :func:`read_excel` did not correctly handle multiple embedded spaces in OpenDocument text cells. (:issue:`32207`) +- Bug in :func:`read_json` was raising ``TypeError`` when reading a ``list`` of Booleans into a :class:`Series`. (:issue:`31464`) +- Bug in :func:`pandas.io.json.json_normalize` where location specified by ``record_path`` doesn't point to an array. (:issue:`26284`) - :func:`pandas.read_hdf` has a more explicit error message when loading an unsupported HDF file (:issue:`9539`) -- Bug in :meth:`~DataFrame.read_feather` was raising an `ArrowIOError` when reading an s3 or http file path (:issue:`29055`) -- Bug in :meth:`~DataFrame.to_excel` could not handle the column name `render` and was raising an ``KeyError`` (:issue:`34331`) -- Bug in :meth:`~SQLDatabase.execute` was raising a ``ProgrammingError`` for some DB-API drivers when the SQL statement contained the `%` character and no parameters were present (:issue:`34211`) -- Bug in :meth:`~pandas.io.stata.StataReader` which resulted in categorical variables with difference dtypes when reading data using an iterator. (:issue:`31544`) -- :meth:`HDFStore.keys` has now an optional `include` parameter that allows the retrieval of all native HDF5 table names (:issue:`29916`) -- `TypeError` exceptions raised by :meth:`read_csv` and :meth:`read_table` were showing as ``parser_f`` when an unexpected keyword argument was passed (:issue:`25648`) -- Bug in :meth:`read_excel` for ODS files removes 0.0 values (:issue:`27222`) -- Bug in :meth:`ujson.encode` was raising an `OverflowError` with numbers larger than sys.maxsize (:issue: `34395`) -- Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the min_itemsize parameter is set (:issue:`11238`) -- Bug in :meth:`~HDFStore.create_table` now raises an error when `column` argument was not specified in `data_columns` on input (:issue:`28156`) -- :meth:`read_json` now could read line-delimited json file from a file url while `lines` and `chunksize` are set. +- Bug in :meth:`~DataFrame.read_feather` was raising an ``ArrowIOError`` when reading an s3 or http file path (:issue:`29055`) +- Bug in :meth:`~DataFrame.to_excel` could not handle the column name ``render`` and was raising an ``KeyError`` (:issue:`34331`) +- Bug in :meth:`~SQLDatabase.execute` was raising a ``ProgrammingError`` for some DB-API drivers when the SQL statement contained the ``%`` character and no parameters were present (:issue:`34211`) +- Bug in :meth:`~pandas.io.stata.StataReader` which resulted in categorical variables with different dtypes when reading data using an iterator. (:issue:`31544`) +- :meth:`HDFStore.keys` has now an optional ``include`` parameter that allows the retrieval of all native HDF5 table names (:issue:`29916`) +- ``TypeError`` exceptions raised by :func:`read_csv` and :func:`read_table` were showing as ``parser_f`` when an unexpected keyword argument was passed (:issue:`25648`) +- Bug in :func:`read_excel` for ODS files removes 0.0 values (:issue:`27222`) +- Bug in :func:`ujson.encode` was raising an ``OverflowError`` with numbers larger than ``sys.maxsize`` (:issue:`34395`) +- Bug in :meth:`HDFStore.append_to_multiple` was raising a ``ValueError`` when the ``min_itemsize`` parameter is set (:issue:`11238`) +- Bug in :meth:`~HDFStore.create_table` now raises an error when ``column`` argument was not specified in ``data_columns`` on input (:issue:`28156`) +- :func:`read_json` now could read line-delimited json file from a file url while ``lines`` and ``chunksize`` are set. - Bug in :meth:`DataFrame.to_sql` when reading DataFrames with ``-np.inf`` entries with MySQL now has a more explicit ``ValueError`` (:issue:`34431`) - Bug where capitalised files extensions were not decompressed by read_* functions (:issue:`35164`) -- Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` given as list (:issue:`31783`) -- Bug in :meth:`read_excel` where datetime values are used in the header in a `MultiIndex` (:issue:`34748`) -- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in keyword ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in keyword ``encoding`` now raises a ``TypeError`` (:issue:`34464`) -- Bug in :meth:`DataFrame.to_records` incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`) +- Bug in :meth:`read_excel` that was raising a ``TypeError`` when ``header=None`` and ``index_col`` is given as a ``list`` (:issue:`31783`) +- Bug in :func:`read_excel` where datetime values are used in the header in a :class:`MultiIndex` (:issue:`34748`) +- :func:`read_excel` no longer takes ``**kwds`` arguments. This means that passing in the keyword argument ``chunksize`` now raises a ``TypeError`` (previously raised a ``NotImplementedError``), while passing in the keyword argument ``encoding`` now raises a ``TypeError`` (:issue:`34464`) +- Bug in :meth:`DataFrame.to_records` was incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`) Plotting ^^^^^^^^ -- :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`). +- :meth:`DataFrame.plot` for line/bar now accepts color by dictionary (:issue:`8193`). - Bug in :meth:`DataFrame.plot.hist` where weights are not working for multiple columns (:issue:`33173`) -- Bug in :meth:`DataFrame.boxplot` and :meth:`DataFrame.plot.boxplot` lost color attributes of ``medianprops``, ``whiskerprops``, ``capprops`` and ``medianprops`` (:issue:`30346`) +- Bug in :meth:`DataFrame.boxplot` and :meth:`DataFrame.plot.boxplot` lost color attributes of ``medianprops``, ``whiskerprops``, ``capprops`` and ``boxprops`` (:issue:`30346`) - Bug in :meth:`DataFrame.hist` where the order of ``column`` argument was ignored (:issue:`29235`) -- Bug in :meth:`DataFrame.plot.scatter` that when adding multiple plots with different ``cmap``, colorbars alway use the first ``cmap`` (:issue:`33389`) -- Bug in :meth:`DataFrame.plot.scatter` was adding a colorbar to the plot even if the argument `c` was assigned to a column containing color names (:issue:`34316`) +- Bug in :meth:`DataFrame.plot.scatter` that when adding multiple plots with different ``cmap``, colorbars always use the first ``cmap`` (:issue:`33389`) +- Bug in :meth:`DataFrame.plot.scatter` was adding a colorbar to the plot even if the argument ``c`` was assigned to a column containing color names (:issue:`34316`) - Bug in :meth:`pandas.plotting.bootstrap_plot` was causing cluttered axes and overlapping labels (:issue:`34905`) - Bug in :meth:`DataFrame.plot.scatter` caused an error when plotting variable marker sizes (:issue:`32904`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Using a :func:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :func:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) -- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`) now raise a ``TypeError`` if a not-accepted keyword argument is passed into it. Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`) -- Bug in :meth:`GroupBy.apply` raises ``ValueError`` when the ``by`` axis is not sorted and has duplicates and the applied ``func`` does not mutate passed in objects (:issue:`30667`) -- Bug in :meth:`DataFrameGroupby.transform` produces incorrect result with transformation functions (:issue:`30918`) +- Using a :class:`pandas.api.indexers.BaseIndexer` with ``count``, ``min``, ``max``, ``median``, ``skew``, ``cov``, ``corr`` will now return correct results for any monotonic :class:`pandas.api.indexers.BaseIndexer` descendant (:issue:`32865`) +- :meth:`DataFrameGroupby.mean` and :meth:`SeriesGroupby.mean` (and similarly for :meth:`~DataFrameGroupby.median`, :meth:`~DataFrameGroupby.std` and :meth:`~DataFrameGroupby.var`) now raise a ``TypeError`` if a non-accepted keyword argument is passed into it. Previously an ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median`) (:issue:`31485`) +- Bug in :meth:`GroupBy.apply` raises ``ValueError`` when the ``by`` axis is not sorted, has duplicates, and the applied ``func`` does not mutate passed in objects (:issue:`30667`) +- Bug in :meth:`DataFrameGroupBy.transform` produces an incorrect result with transformation functions (:issue:`30918`) - Bug in :meth:`Groupby.transform` was returning the wrong result when grouping by multiple keys of which some were categorical and others not (:issue:`32494`) -- Bug in :meth:`GroupBy.count` causes segmentation fault when grouped-by column contains NaNs (:issue:`32841`) -- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` produces inconsistent type when aggregating Boolean series (:issue:`32894`) +- Bug in :meth:`GroupBy.count` causes segmentation fault when grouped-by columns contain NaNs (:issue:`32841`) +- Bug in :meth:`DataFrame.groupby` and :meth:`Series.groupby` produces inconsistent type when aggregating Boolean :class:`Series` (:issue:`32894`) - Bug in :meth:`DataFrameGroupBy.sum` and :meth:`SeriesGroupBy.sum` where a large negative number would be returned when the number of non-null values was below ``min_count`` for nullable integer dtypes (:issue:`32861`) -- Bug in :meth:`SeriesGroupBy.quantile` raising on nullable integers (:issue:`33136`) +- Bug in :meth:`SeriesGroupBy.quantile` was raising on nullable integers (:issue:`33136`) - Bug in :meth:`DataFrame.resample` where an ``AmbiguousTimeError`` would be raised when the resulting timezone aware :class:`DatetimeIndex` had a DST transition at midnight (:issue:`25758`) - Bug in :meth:`DataFrame.groupby` where a ``ValueError`` would be raised when grouping by a categorical column with read-only categories and ``sort=False`` (:issue:`33410`) - Bug in :meth:`GroupBy.agg`, :meth:`GroupBy.transform`, and :meth:`GroupBy.resample` where subclasses are not preserved (:issue:`28330`) -- Bug in :meth:`SeriesGroupBy.agg` where any column name was accepted in the named aggregation of ``SeriesGroupBy`` previously. The behaviour now allows only ``str`` and callables else would raise ``TypeError``. (:issue:`34422`) -- Bug in :meth:`DataFrame.groupby` lost index, when one of the ``agg`` keys referenced an empty list (:issue:`32580`) +- Bug in :meth:`SeriesGroupBy.agg` where any column name was accepted in the named aggregation of :class:`SeriesGroupBy` previously. The behaviour now allows only ``str`` and callables else would raise ``TypeError``. (:issue:`34422`) +- Bug in :meth:`DataFrame.groupby` lost the name of the :class:`Index` when one of the ``agg`` keys referenced an empty list (:issue:`32580`) - Bug in :meth:`Rolling.apply` where ``center=True`` was ignored when ``engine='numba'`` was specified (:issue:`34784`) - Bug in :meth:`DataFrame.ewm.cov` was throwing ``AssertionError`` for :class:`MultiIndex` inputs (:issue:`34440`) -- Bug in :meth:`core.groupby.DataFrameGroupBy.quantile` raises ``TypeError`` for non-numeric types rather than dropping columns (:issue:`27892`) +- Bug in :meth:`core.groupby.DataFrameGroupBy.quantile` raised ``TypeError`` for non-numeric types rather than dropping the columns (:issue:`27892`) - Bug in :meth:`core.groupby.DataFrameGroupBy.transform` when ``func='nunique'`` and columns are of type ``datetime64``, the result would also be of type ``datetime64`` instead of ``int64`` (:issue:`35109`) - Bug in :meth:`DataFrame.groupby` raising an ``AttributeError`` when selecting a column and aggregating with ``as_index=False`` (:issue:`35246`). -- Bug in :meth:'DataFrameGroupBy.first' and :meth:'DataFrameGroupBy.last' that would raise an unnecessary ``ValueError`` when grouping on multiple ``Categoricals`` (:issue:`34951`) +- Bug in :meth:`DataFrameGroupBy.first` and :meth:`DataFrameGroupBy.last` that would raise an unnecessary ``ValueError`` when grouping on multiple ``Categoricals`` (:issue:`34951`) Reshaping ^^^^^^^^^ -- Bug effecting all numeric and boolean reduction methods not returning subclassed data type. (:issue:`25596`) -- Bug in :meth:`DataFrame.pivot_table` when only MultiIndexed columns is set (:issue:`17038`) -- Bug in :meth:`DataFrame.unstack` and :meth:`Series.unstack` can take tuple names in MultiIndexed data (:issue:`19966`) +- Bug effecting all numeric and Boolean reduction methods not returning subclassed data type. (:issue:`25596`) +- Bug in :meth:`DataFrame.pivot_table` when only :class:`MultiIndexed` columns is set (:issue:`17038`) +- Bug in :meth:`DataFrame.unstack` and :meth:`Series.unstack` can take tuple names in :class:`MultiIndexed` data (:issue:`19966`) - Bug in :meth:`DataFrame.pivot_table` when ``margin`` is ``True`` and only ``column`` is defined (:issue:`31016`) -- Fix incorrect error message in :meth:`DataFrame.pivot` when ``columns`` is set to ``None``. (:issue:`30924`) -- Bug in :func:`crosstab` when inputs are two Series and have tuple names, the output will keep dummy MultiIndex as columns. (:issue:`18321`) +- Fixed incorrect error message in :meth:`DataFrame.pivot` when ``columns`` is set to ``None``. (:issue:`30924`) +- Bug in :func:`crosstab` when inputs are two :class:`Series` and have tuple names, the output will keep a dummy :class:`MultiIndex` as columns. (:issue:`18321`) - :meth:`DataFrame.pivot` can now take lists for ``index`` and ``columns`` arguments (:issue:`21425`) - Bug in :func:`concat` where the resulting indices are not copied when ``copy=True`` (:issue:`29879`) - Bug in :meth:`SeriesGroupBy.aggregate` was resulting in aggregations being overwritten when they shared the same name (:issue:`30880`) -- Bug where :meth:`Index.astype` would lose the name attribute when converting from ``Float64Index`` to ``Int64Index``, or when casting to an ``ExtensionArray`` dtype (:issue:`32013`) -- :meth:`Series.append` will now raise a ``TypeError`` when passed a DataFrame or a sequence containing Dataframe (:issue:`31413`) +- Bug where :meth:`Index.astype` would lose the :attr:`name` attribute when converting from ``Float64Index`` to ``Int64Index``, or when casting to an ``ExtensionArray`` dtype (:issue:`32013`) +- :meth:`Series.append` will now raise a ``TypeError`` when passed a :class:`DataFrame` or a sequence containing :class:`DataFrame` (:issue:`31413`) - :meth:`DataFrame.replace` and :meth:`Series.replace` will raise a ``TypeError`` if ``to_replace`` is not an expected type. Previously the ``replace`` would fail silently (:issue:`18634`) -- Bug on inplace operation of a Series that was adding a column to the DataFrame from where it was originally dropped from (using inplace=True) (:issue:`30484`) +- Bug on inplace operation of a :class:`Series` that was adding a column to the :class:`DataFrame` from where it was originally dropped from (using ``inplace=True``) (:issue:`30484`) - Bug in :meth:`DataFrame.apply` where callback was called with :class:`Series` parameter even though ``raw=True`` requested. (:issue:`32423`) - Bug in :meth:`DataFrame.pivot_table` losing timezone information when creating a :class:`MultiIndex` level from a column with timezone-aware dtype (:issue:`32558`) -- Bug in :meth:`concat` where when passing a non-dict mapping as ``objs`` would raise a ``TypeError`` (:issue:`32863`) -- :meth:`DataFrame.agg` now provides more descriptive ``SpecificationError`` message when attempting to aggregating non-existant column (:issue:`32755`) -- Bug in :meth:`DataFrame.unstack` when MultiIndexed columns and MultiIndexed rows were used (:issue:`32624`, :issue:`24729` and :issue:`28306`) -- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True`` instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) +- Bug in :func:`concat` where when passing a non-dict mapping as ``objs`` would raise a ``TypeError`` (:issue:`32863`) +- :meth:`DataFrame.agg` now provides more descriptive ``SpecificationError`` message when attempting to aggregate a non-existent column (:issue:`32755`) +- Bug in :meth:`DataFrame.unstack` when :class:`MultiIndex` columns and :class:`MultiIndex` rows were used (:issue:`32624`, :issue:`24729` and :issue:`28306`) +- Appending a dictionary to a :class:`DataFrame` without passing ``ignore_index=True`` will raise ``TypeError: Can only append a dict if ignore_index=True`` instead of ``TypeError: Can only append a :class:`Series` if ignore_index=True or if the :class:`Series` has a name`` (:issue:`30871`) - Bug in :meth:`DataFrame.corrwith()`, :meth:`DataFrame.memory_usage()`, :meth:`DataFrame.dot()`, :meth:`DataFrame.idxmin()`, :meth:`DataFrame.idxmax()`, :meth:`DataFrame.duplicated()`, :meth:`DataFrame.isin()`, :meth:`DataFrame.count()`, :meth:`Series.explode()`, :meth:`Series.asof()` and :meth:`DataFrame.asof()` not returning subclassed types. (:issue:`31331`) -- Bug in :func:`concat` was not allowing for concatenation of ``DataFrame`` and ``Series`` with duplicate keys (:issue:`33654`) -- Bug in :func:`cut` raised an error when non-unique labels (:issue:`33141`) +- Bug in :func:`concat` was not allowing for concatenation of :class:`DataFrame` and :class:`Series` with duplicate keys (:issue:`33654`) +- Bug in :func:`cut` raised an error when the argument ``labels`` contains duplicates (:issue:`33141`) - Ensure only named functions can be used in :func:`eval()` (:issue:`32460`) -- Bug in :func:`Dataframe.aggregate` and :func:`Series.aggregate` was causing recursive loop in some cases (:issue:`34224`) -- Fixed bug in :func:`melt` where melting MultiIndex columns with ``col_level`` > 0 would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) -- Bug in :meth:`Series.where` with an empty Series and empty ``cond`` having non-bool dtype (:issue:`34592`) -- Fixed regression where :meth:`DataFrame.apply` would raise ``ValueError`` for elements whth ``S`` dtype (:issue:`34529`) +- Bug in :meth:`Dataframe.aggregate` and :meth:`Series.aggregate` was causing a recursive loop in some cases (:issue:`34224`) +- Fixed bug in :func:`melt` where melting :class:`MultiIndex` columns with ``col_level > 0`` would raise a ``KeyError`` on ``id_vars`` (:issue:`34129`) +- Bug in :meth:`Series.where` with an empty :class:`Series` and empty ``cond`` having non-bool dtype (:issue:`34592`) +- Fixed regression where :meth:`DataFrame.apply` would raise ``ValueError`` for elements with ``S`` dtype (:issue:`34529`) Sparse ^^^^^^ - Creating a :class:`SparseArray` from timezone-aware dtype will issue a warning before dropping timezone information, instead of doing so silently (:issue:`32501`) - Bug in :meth:`arrays.SparseArray.from_spmatrix` wrongly read scipy sparse matrix (:issue:`31991`) -- Bug in :meth:`Series.sum` with ``SparseArray`` raises ``TypeError`` (:issue:`25777`) +- Bug in :meth:`Series.sum` with ``SparseArray`` raised a ``TypeError`` (:issue:`25777`) - Bug where :class:`DataFrame` containing an all-sparse :class:`SparseArray` filled with ``NaN`` when indexed by a list-like (:issue:`27781`, :issue:`29563`) - The repr of :class:`SparseDtype` now includes the repr of its ``fill_value`` attribute. Previously it used ``fill_value``'s string representation (:issue:`34352`) - Bug where empty :class:`DataFrame` could not be cast to :class:`SparseDtype` (:issue:`33113`) @@ -1182,23 +1181,23 @@ ExtensionArray ^^^^^^^^^^^^^^ - Fixed bug where :meth:`Series.value_counts` would raise on empty input of ``Int64`` dtype (:issue:`33317`) -- Fixed bug in :func:`concat` when concatenating DataFrames with non-overlaping columns resulting in object-dtype columns rather than preserving the extension dtype (:issue:`27692`, :issue:`33027`) +- Fixed bug in :func:`concat` when concatenating :class:`DataFrame` objects with non-overlapping columns resulting in object-dtype columns rather than preserving the extension dtype (:issue:`27692`, :issue:`33027`) - Fixed bug where :meth:`StringArray.isna` would return ``False`` for NA values when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`33655`) - Fixed bug in :class:`Series` construction with EA dtype and index but no data or scalar data fails (:issue:`26469`) - Fixed bug that caused :meth:`Series.__repr__()` to crash for extension types whose elements are multidimensional arrays (:issue:`33770`). - Fixed bug where :meth:`Series.update` would raise a ``ValueError`` for ``ExtensionArray`` dtypes with missing values (:issue:`33980`) - Fixed bug where :meth:`StringArray.memory_usage` was not implemented (:issue:`33963`) -- Fixed bug where :meth:`DataFrameGroupBy` would ignore the ``min_count`` argument for aggregations on nullable boolean dtypes (:issue:`34051`) -- Fixed bug that `DataFrame(columns=.., dtype='string')` would fail (:issue:`27953`, :issue:`33623`) +- Fixed bug where :meth:`DataFrameGroupBy` would ignore the ``min_count`` argument for aggregations on nullable Boolean dtypes (:issue:`34051`) +- Fixed bug where the constructor of :class:`DataFrame` with ``dtype='string'`` would fail (:issue:`27953`, :issue:`33623`) - Bug where :class:`DataFrame` column set to scalar extension type was considered an object type rather than the extension type (:issue:`34832`) -- Fixed bug in ``IntegerArray.astype`` to correctly copy the mask as well (:issue:`34931`). +- Fixed bug in :meth:`IntegerArray.astype` to correctly copy the mask as well (:issue:`34931`). Other ^^^^^ - Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`) -- Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if left object is a different subclass with ``check_series_type=True`` (:issue:`32670`). -- Getting a missing attribute in a query/eval string raises the correct ``AttributeError`` (:issue:`32408`) +- Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if the ``left`` argument is a different subclass with ``check_series_type=True`` (:issue:`32670`). +- Getting a missing attribute in a :meth:`DataFrame.query` or :meth:`DataFrame.eval` string raises the correct ``AttributeError`` (:issue:`32408`) - Fixed bug in :func:`pandas.testing.assert_series_equal` where dtypes were checked for ``Interval`` and ``ExtensionArray`` operands when ``check_dtype`` was ``False`` (:issue:`32747`) - Bug in :meth:`DataFrame.__dir__` caused a segfault when using unicode surrogates in a column name (:issue:`25509`) - Bug in :meth:`DataFrame.equals` and :meth:`Series.equals` in allowing subclasses to be equal (:issue:`34402`). From 6302f7b98ad24adda2d5a98fef3956f04f28039d Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Mon, 27 Jul 2020 20:00:06 +0200 Subject: [PATCH 173/632] REGR: revert ExtensionBlock.set to be in-place (#35271) * REGR: revert ExtensionBlock.set to be in-place Co-authored-by: Simon Hawkins Co-authored-by: Tom Augspurger --- doc/source/whatsnew/v1.1.0.rst | 1 - pandas/core/internals/blocks.py | 2 +- pandas/tests/indexing/test_iloc.py | 1 + pandas/tests/indexing/test_indexing.py | 10 ++++++++++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index c2a4abbea107c..04a816b50103c 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -999,7 +999,6 @@ Indexing - Bug in :meth:`Series.__getitem__` indexing with non-standard scalars, e.g. ``np.dtype`` (:issue:`32684`) - Bug in :class:`Index` constructor where an unhelpful error message was raised for NumPy scalars (:issue:`33017`) - Bug in :meth:`DataFrame.lookup` incorrectly raising an ``AttributeError`` when ``frame.index`` or ``frame.columns`` is not unique; this will now raise a ``ValueError`` with a helpful error message (:issue:`33041`) -- Bug in :meth:`DataFrame.iloc.__setitem__` creating a new array instead of overwriting ``Categorical`` values in-place (:issue:`32831`) - Bug in :class:`Interval` where a :class:`Timedelta` could not be added or subtracted from a :class:`Timestamp` interval (:issue:`32023`) - Bug in :meth:`DataFrame.copy` not invalidating _item_cache after copy caused post-copy value updates to not be reflected (:issue:`31784`) - Fixed regression in :meth:`DataFrame.loc` and :meth:`Series.loc` throwing an error when a ``datetime64[ns, tz]`` value is provided (:issue:`32395`) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index cc0f09ced7399..6ca6eca1ff829 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1589,7 +1589,7 @@ def should_store(self, value: ArrayLike) -> bool: def set(self, locs, values): assert locs.tolist() == [0] - self.values[:] = values + self.values = values def putmask( self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index c5f40102874dd..4fae01ec710fd 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -694,6 +694,7 @@ def test_series_indexing_zerodim_np_array(self): result = s.iloc[np.array(0)] assert result == 1 + @pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/33457") def test_iloc_setitem_categorical_updates_inplace(self): # Mixed dtype ensures we go through take_split_path in setitem_with_indexer cat = pd.Categorical(["A", "B", "C"]) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ced70069dd955..5b7f013d5de31 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1100,3 +1100,13 @@ def test_long_text_missing_labels_inside_loc_error_message_limited(): error_message_regex = "long_missing_label_text_0.*\\\\n.*long_missing_label_text_1" with pytest.raises(KeyError, match=error_message_regex): s.loc[["a", "c"] + missing_labels] + + +def test_setitem_categorical(): + # https://github.com/pandas-dev/pandas/issues/35369 + df = pd.DataFrame({"h": pd.Series(list("mn")).astype("category")}) + df.h = df.h.cat.reorder_categories(["n", "m"]) + expected = pd.DataFrame( + {"h": pd.Categorical(["m", "n"]).reorder_categories(["n", "m"])} + ) + tm.assert_frame_equal(df, expected) From 7f3502d6ed9ec9f1e1a555185e357ce339a8f4bc Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 28 Jul 2020 08:11:26 -0500 Subject: [PATCH 174/632] DOC: 1.1.0 release date (#35435) * DOC: 1.1.0 release date --- doc/source/whatsnew/v1.1.0.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 04a816b50103c..a49b29d691692 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -1,7 +1,7 @@ .. _whatsnew_110: -What's new in 1.1.0 (??) ------------------------- +What's new in 1.1.0 (July 28, 2020) +----------------------------------- These are the changes in pandas 1.1.0. See :ref:`release` for a full changelog including other versions of pandas. From d9fff2792bf16178d4e450fe7384244e50635733 Mon Sep 17 00:00:00 2001 From: Pandas Development Team Date: Tue, 28 Jul 2020 08:14:59 -0500 Subject: [PATCH 175/632] RLS: 1.1.0 From d1b1faa3f68dad6163000784c34a66b8f6c227e1 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 28 Jul 2020 14:28:37 -0500 Subject: [PATCH 176/632] Start 1.2.0 From 9b4d875ba3fe4535d937a56c5260d7f4c1710ab5 Mon Sep 17 00:00:00 2001 From: Florian Roscheck <9593883+flrs@users.noreply.github.com> Date: Wed, 29 Jul 2020 02:12:21 -0700 Subject: [PATCH 177/632] DOC: Fix small spelling mistake in style docs (#35355) --- doc/source/user_guide/style.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/style.ipynb b/doc/source/user_guide/style.ipynb index fd8dda4fe365e..77a1fef28f373 100644 --- a/doc/source/user_guide/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -141,7 +141,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this case, the cell's style depends only on it's own value.\n", + "In this case, the cell's style depends only on its own value.\n", "That means we should use the `Styler.applymap` method which works elementwise." ] }, From 725900f9e6eacb7f02f38f68c9ef497560515f16 Mon Sep 17 00:00:00 2001 From: Ty Mick Date: Wed, 29 Jul 2020 05:13:53 -0400 Subject: [PATCH 178/632] Improve dropna subset example (#35337) --- pandas/core/frame.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f52341ed782d8..3f634c1e6e1ff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4966,9 +4966,10 @@ def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): Define in which columns to look for missing values. - >>> df.dropna(subset=['name', 'born']) + >>> df.dropna(subset=['name', 'toy']) name toy born 1 Batman Batmobile 1940-04-25 + 2 Catwoman Bullwhip NaT Keep the DataFrame with valid entries in the same variable. From 55d2c653d3cf44613d6d8a5e5f3a3e3169b06d44 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Wed, 29 Jul 2020 04:15:17 -0500 Subject: [PATCH 179/632] CI: resolve conflict mypy and isort (#35339) --- pandas/tests/test_algos.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index a080bf0feaebc..6c6bdb6b1b2bd 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -6,7 +6,8 @@ from numpy.random import RandomState import pytest -from pandas._libs import algos as libalgos, groupby as libgroupby, hashtable as ht +from pandas._libs import algos as libalgos, hashtable as ht +from pandas._libs.groupby import group_var_float32, group_var_float64 from pandas.compat.numpy import np_array_datetime64_compat import pandas.util._test_decorators as td @@ -1493,7 +1494,7 @@ def test_group_var_constant(self): class TestGroupVarFloat64(GroupVarTestMixin): __test__ = True - algo = staticmethod(libgroupby.group_var_float64) + algo = staticmethod(group_var_float64) dtype = np.float64 rtol = 1e-5 @@ -1516,7 +1517,7 @@ def test_group_var_large_inputs(self): class TestGroupVarFloat32(GroupVarTestMixin): __test__ = True - algo = staticmethod(libgroupby.group_var_float32) + algo = staticmethod(group_var_float32) dtype = np.float32 rtol = 1e-2 From a95e680e97e3b9a1aba86cbecbdf045133533b3b Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 29 Jul 2020 10:16:24 +0100 Subject: [PATCH 180/632] TYP: remove # type: ignore for unpacking compression_args (#35344) --- pandas/io/common.py | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/pandas/io/common.py b/pandas/io/common.py index bd77a1e69c138..6ac8051f35b6f 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -267,8 +267,8 @@ def file_path_to_url(path: str) -> str: def get_compression_method( - compression: Optional[Union[str, Mapping[str, str]]] -) -> Tuple[Optional[str], Dict[str, str]]: + compression: Optional[Union[str, Mapping[str, Any]]] +) -> Tuple[Optional[str], Dict[str, Any]]: """ Simplifies a compression argument to a compression method string and a mapping containing additional arguments. @@ -282,21 +282,23 @@ def get_compression_method( Returns ------- tuple of ({compression method}, Optional[str] - {compression arguments}, Dict[str, str]) + {compression arguments}, Dict[str, Any]) Raises ------ ValueError on mapping missing 'method' key """ + compression_method: Optional[str] if isinstance(compression, Mapping): compression_args = dict(compression) try: - compression = compression_args.pop("method") + compression_method = compression_args.pop("method") except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: compression_args = {} - return compression, compression_args + compression_method = compression + return compression_method, compression_args def infer_compression( @@ -434,28 +436,19 @@ def get_handle( if compression: - # GH33398 the type ignores here seem related to mypy issue #5382; - # it may be possible to remove them once that is resolved. - # GZ Compression if compression == "gzip": if is_path: - f = gzip.open( - path_or_buf, mode, **compression_args # type: ignore - ) + f = gzip.open(path_or_buf, mode, **compression_args) else: - f = gzip.GzipFile( - fileobj=path_or_buf, **compression_args # type: ignore - ) + f = gzip.GzipFile(fileobj=path_or_buf, **compression_args) # BZ Compression elif compression == "bz2": if is_path: - f = bz2.BZ2File( - path_or_buf, mode, **compression_args # type: ignore - ) + f = bz2.BZ2File(path_or_buf, mode, **compression_args) else: - f = bz2.BZ2File(path_or_buf, **compression_args) # type: ignore + f = bz2.BZ2File(path_or_buf, **compression_args) # ZIP Compression elif compression == "zip": From 2c3fa22c076ddf9ab6f67a6361a9fb88ffc5a689 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Wed, 29 Jul 2020 05:20:32 -0400 Subject: [PATCH 181/632] DOC: whatsnew for 1.2 (#35315) --- doc/source/whatsnew/index.rst | 8 ++ doc/source/whatsnew/v1.2.0.rst | 168 +++++++++++++++++++++++++++++++++ 2 files changed, 176 insertions(+) create mode 100644 doc/source/whatsnew/v1.2.0.rst diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index ad5bb5a5b2d72..17043b83f03df 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -10,6 +10,14 @@ This is the list of changes to pandas between each release. For full details, see the `commit logs `_. For install and upgrade instructions, see :ref:`install`. +Version 1.2 +----------- + +.. toctree:: + :maxdepth: 2 + + v1.2.0 + Version 1.1 ----------- diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst new file mode 100644 index 0000000000000..2066858e5de86 --- /dev/null +++ b/doc/source/whatsnew/v1.2.0.rst @@ -0,0 +1,168 @@ +.. _whatsnew_120: + +What's new in 1.2.0 (??) +------------------------ + +These are the changes in pandas 1.2.0. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +Enhancements +~~~~~~~~~~~~ + +.. _whatsnew_120.enhancements.other: + +Other enhancements +^^^^^^^^^^^^^^^^^^ + +- +- + + +.. --------------------------------------------------------------------------- + +.. _whatsnew_120.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + + +.. _whatsnew_120.performance: + +Performance improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_120.bug_fixes: + +Bug fixes +~~~~~~~~~ + + +Categorical +^^^^^^^^^^^ + +- +- + +Datetimelike +^^^^^^^^^^^^ +- +- + +Timedelta +^^^^^^^^^ + +- +- + +Timezones +^^^^^^^^^ + +- +- + + +Numeric +^^^^^^^ +- +- + +Conversion +^^^^^^^^^^ + +- +- + +Strings +^^^^^^^ + +- +- + + +Interval +^^^^^^^^ + +- +- + +Indexing +^^^^^^^^ + +- +- + +Missing +^^^^^^^ + +- +- + +MultiIndex +^^^^^^^^^^ + +- +- + +I/O +^^^ + +- +- + +Plotting +^^^^^^^^ + +- +- + +Groupby/resample/rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- +- + + +Reshaping +^^^^^^^^^ + +- +- + +Sparse +^^^^^^ + +- +- + +ExtensionArray +^^^^^^^^^^^^^^ + +- +- + + +Other +^^^^^ +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_120.contributors: + +Contributors +~~~~~~~~~~~~ \ No newline at end of file From 3ab77a90c9a68d6f9609cda6221f78decd905e36 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 29 Jul 2020 08:45:32 -0500 Subject: [PATCH 182/632] DOC: Start 1.1.1 (#35452) --- doc/source/whatsnew/index.rst | 1 + doc/source/whatsnew/v1.1.1.rst | 54 ++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 doc/source/whatsnew/v1.1.1.rst diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 17043b83f03df..a280a981c789b 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.1 .. toctree:: :maxdepth: 2 + v1.1.1 v1.1.0 Version 1.0 diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst new file mode 100644 index 0000000000000..443589308ad4c --- /dev/null +++ b/doc/source/whatsnew/v1.1.1.rst @@ -0,0 +1,54 @@ +.. _whatsnew_111: + +What's new in 1.1.1 (?) +----------------------- + +These are the changes in pandas 1.1.1. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_111.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ + +- +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_111.bug_fixes: + +Bug fixes +~~~~~~~~~ + +**Datetimelike** + +- +- + +**Numeric** + +- +- + +**Plotting** + +- + +**Indexing** + +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_111.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.1.0..v1.1.1|HEAD From bb2ff3330c8071ba7ceafa7be8badf717f3e03bb Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Wed, 29 Jul 2020 09:33:02 -0500 Subject: [PATCH 183/632] CLN: resolve isort mypy import conflict (#35134) (#35386) --- pandas/tests/groupby/transform/test_transform.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index cdaf27e214d80..c09f35526a6bf 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas._libs import groupby +from pandas._libs.groupby import group_cumprod_float64, group_cumsum from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype @@ -545,14 +545,14 @@ def _check_cython_group_transform_cumulative(pd_op, np_op, dtype): def test_cython_group_transform_cumsum(any_real_dtype): # see gh-4095 dtype = np.dtype(any_real_dtype).type - pd_op, np_op = groupby.group_cumsum, np.cumsum + pd_op, np_op = group_cumsum, np.cumsum _check_cython_group_transform_cumulative(pd_op, np_op, dtype) def test_cython_group_transform_cumprod(): # see gh-4095 dtype = np.float64 - pd_op, np_op = groupby.group_cumprod_float64, np.cumproduct + pd_op, np_op = group_cumprod_float64, np.cumproduct _check_cython_group_transform_cumulative(pd_op, np_op, dtype) @@ -567,13 +567,13 @@ def test_cython_group_transform_algos(): data = np.array([[1], [2], [3], [np.nan], [4]], dtype="float64") actual = np.zeros_like(data) actual.fill(np.nan) - groupby.group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike) + group_cumprod_float64(actual, data, labels, ngroups, is_datetimelike) expected = np.array([1, 2, 6, np.nan, 24], dtype="float64") tm.assert_numpy_array_equal(actual[:, 0], expected) actual = np.zeros_like(data) actual.fill(np.nan) - groupby.group_cumsum(actual, data, labels, ngroups, is_datetimelike) + group_cumsum(actual, data, labels, ngroups, is_datetimelike) expected = np.array([1, 3, 6, np.nan, 10], dtype="float64") tm.assert_numpy_array_equal(actual[:, 0], expected) @@ -581,7 +581,7 @@ def test_cython_group_transform_algos(): is_datetimelike = True data = np.array([np.timedelta64(1, "ns")] * 5, dtype="m8[ns]")[:, None] actual = np.zeros_like(data, dtype="int64") - groupby.group_cumsum(actual, data.view("int64"), labels, ngroups, is_datetimelike) + group_cumsum(actual, data.view("int64"), labels, ngroups, is_datetimelike) expected = np.array( [ np.timedelta64(1, "ns"), From 414430c78f8aab601feb4067d3731e70abfce8ec Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Wed, 29 Jul 2020 09:33:43 -0500 Subject: [PATCH 184/632] isort mypy import confilict (#35380) --- pandas/tests/reshape/merge/test_join.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index c33443e24b268..d4d4c4190417e 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -2,7 +2,7 @@ from numpy.random import randn import pytest -from pandas._libs import join as libjoin +from pandas._libs.join import inner_join, left_outer_join import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, concat, merge @@ -48,7 +48,7 @@ def test_cython_left_outer_join(self): right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) max_group = 5 - ls, rs = libjoin.left_outer_join(left, right, max_group) + ls, rs = left_outer_join(left, right, max_group) exp_ls = left.argsort(kind="mergesort") exp_rs = right.argsort(kind="mergesort") @@ -70,7 +70,7 @@ def test_cython_right_outer_join(self): right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64) max_group = 5 - rs, ls = libjoin.left_outer_join(right, left, max_group) + rs, ls = left_outer_join(right, left, max_group) exp_ls = left.argsort(kind="mergesort") exp_rs = right.argsort(kind="mergesort") @@ -116,7 +116,7 @@ def test_cython_inner_join(self): right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64) max_group = 5 - ls, rs = libjoin.inner_join(left, right, max_group) + ls, rs = inner_join(left, right, max_group) exp_ls = left.argsort(kind="mergesort") exp_rs = right.argsort(kind="mergesort") From 08b3ee88aaf08923e659b12ecaf818ef70d88a06 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 29 Jul 2020 16:43:10 +0100 Subject: [PATCH 185/632] DOC: update Python support policy (#35454) --- doc/source/development/policies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/development/policies.rst b/doc/source/development/policies.rst index 1031bbfc46457..a564afc408df9 100644 --- a/doc/source/development/policies.rst +++ b/doc/source/development/policies.rst @@ -52,6 +52,6 @@ Python support ~~~~~~~~~~~~~~ pandas will only drop support for specific Python versions (e.g. 3.6.x, 3.7.x) in -pandas **major** releases. +pandas **major** or **minor** releases. .. _SemVer: https://semver.org From 3b1d4f1eef46a45b7d32694bb263a2e40a933e74 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 29 Jul 2020 16:59:28 +0100 Subject: [PATCH 186/632] CI: Unpin pytest (#35272) --- ci/deps/azure-36-32bit.yaml | 2 +- ci/deps/azure-36-locale.yaml | 2 +- ci/deps/azure-36-locale_slow.yaml | 2 +- ci/deps/azure-36-slow.yaml | 2 +- ci/deps/azure-37-locale.yaml | 2 +- ci/deps/azure-37-numpydev.yaml | 2 +- ci/deps/azure-macos-36.yaml | 2 +- ci/deps/azure-windows-36.yaml | 2 +- ci/deps/azure-windows-37.yaml | 2 +- ci/deps/travis-36-cov.yaml | 2 +- ci/deps/travis-36-locale.yaml | 2 +- ci/deps/travis-37-arm64.yaml | 2 +- ci/deps/travis-37.yaml | 2 +- ci/deps/travis-38.yaml | 2 +- environment.yml | 2 +- pandas/_testing.py | 6 ++---- pandas/tests/groupby/test_categorical.py | 10 ++++------ pandas/util/_test_decorators.py | 18 ++++++++++-------- requirements-dev.txt | 2 +- setup.cfg | 2 +- 20 files changed, 33 insertions(+), 35 deletions(-) diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-36-32bit.yaml index 2dc53f8181ac4..15704cf0d5427 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-36-32bit.yaml @@ -23,4 +23,4 @@ dependencies: - pip - pip: - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index d31015fde4741..a9b9a5a47ccf5 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index 23121b985492e..c086b3651afc3 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-36-slow.yaml b/ci/deps/azure-36-slow.yaml index 0a6d1d13c8549..87bad59fa4873 100644 --- a/ci/deps/azure-36-slow.yaml +++ b/ci/deps/azure-36-slow.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 4dbb6a5344976..6f64c81f299d1 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -6,7 +6,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 451fb5884a4af..5cb58756a6ac1 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.* # tools - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-36.yaml index 81a27465f9e61..eeea249a19ca1 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-36.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.6.* # tools - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 4d7e1d821037b..548660cabaa67 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 34fca631df6c1..5bbd0e2795d7e 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-36-cov.yaml index 5f5ea8034cddf..177e0d3f4c0af 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-36-cov.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-cov # this is only needed in the coverage build diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 6bc4aba733ee5..03a1e751b6a86 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index f434a03609b26..5cb53489be225 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.13 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index aaf706d61fe5c..e896233aac63c 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index ac39a223cd086..b879c0f81dab2 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/environment.yml b/environment.yml index 53222624619de..3b088ca511be9 100644 --- a/environment.yml +++ b/environment.yml @@ -52,7 +52,7 @@ dependencies: - botocore>=1.11 - hypothesis>=3.82 - moto # mock S3 - - pytest>=5.0.1,<6.0.0rc0 + - pytest>=5.0.1 - pytest-cov - pytest-xdist>=1.21 - pytest-asyncio diff --git a/pandas/_testing.py b/pandas/_testing.py index fc6df7a95e348..1cf9304ed2715 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -9,7 +9,7 @@ from shutil import rmtree import string import tempfile -from typing import Any, Callable, List, Optional, Type, Union, cast +from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast import warnings import zipfile @@ -2880,9 +2880,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]): return expected -def external_error_raised( - expected_exception: Type[Exception], -) -> Callable[[Type[Exception], None], None]: +def external_error_raised(expected_exception: Type[Exception],) -> ContextManager: """ Helper function to mark pytest.raises that have an external error message. diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 7e4513da37dc9..0d447a70b540d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1294,9 +1294,7 @@ def test_get_nonexistent_category(): ) -def test_series_groupby_on_2_categoricals_unobserved( - reduction_func: str, observed: bool, request -): +def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request): # GH 17605 if reduction_func == "ngroup": pytest.skip("ngroup is not truly a reduction") @@ -1326,7 +1324,7 @@ def test_series_groupby_on_2_categoricals_unobserved( def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( - reduction_func: str, request + reduction_func, request ): # GH 17605 # Tests whether the unobserved categories in the result contain 0 or NaN @@ -1374,7 +1372,7 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( assert np.issubdtype(result.dtype, np.integer) -def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func: str): +def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func): # GH 23865 # GH 27075 # Ensure that df.groupby, when 'by' is two pd.Categorical variables, @@ -1402,7 +1400,7 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_fun @pytest.mark.parametrize("observed", [False, None]) def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( - reduction_func: str, observed: bool, request + reduction_func, observed, request ): # GH 23865 # GH 27075 diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index a4a1d83177c50..bdf633839b2cd 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -120,7 +120,9 @@ def _skip_if_no_scipy() -> bool: ) -def skip_if_installed(package: str) -> Callable: +# TODO: return type, _pytest.mark.structures.MarkDecorator is not public +# https://github.com/pytest-dev/pytest/issues/7469 +def skip_if_installed(package: str): """ Skip a test if a package is installed. @@ -134,7 +136,9 @@ def skip_if_installed(package: str) -> Callable: ) -def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable: +# TODO: return type, _pytest.mark.structures.MarkDecorator is not public +# https://github.com/pytest-dev/pytest/issues/7469 +def skip_if_no(package: str, min_version: Optional[str] = None): """ Generic function to help skip tests when required packages are not present on the testing system. @@ -196,14 +200,12 @@ def skip_if_no(package: str, min_version: Optional[str] = None) -> Callable: ) -def skip_if_np_lt( - ver_str: str, reason: Optional[str] = None, *args, **kwds -) -> Callable: +# TODO: return type, _pytest.mark.structures.MarkDecorator is not public +# https://github.com/pytest-dev/pytest/issues/7469 +def skip_if_np_lt(ver_str: str, *args, reason: Optional[str] = None): if reason is None: reason = f"NumPy {ver_str} or greater required" - return pytest.mark.skipif( - _np_version < LooseVersion(ver_str), reason=reason, *args, **kwds - ) + return pytest.mark.skipif(_np_version < LooseVersion(ver_str), *args, reason=reason) def parametrize_fixture_doc(*args): diff --git a/requirements-dev.txt b/requirements-dev.txt index 0c024d1b54637..7bf3df176b378 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,7 +32,7 @@ boto3 botocore>=1.11 hypothesis>=3.82 moto -pytest>=5.0.1,<6.0.0rc0 +pytest>=5.0.1 pytest-cov pytest-xdist>=1.21 pytest-asyncio diff --git a/setup.cfg b/setup.cfg index 00af7f6f1b79a..ee5725e36d193 100644 --- a/setup.cfg +++ b/setup.cfg @@ -105,7 +105,7 @@ known_dtypes = pandas.core.dtypes known_post_core = pandas.tseries,pandas.io,pandas.plotting sections = FUTURE,STDLIB,THIRDPARTY,PRE_LIBS,PRE_CORE,DTYPES,FIRSTPARTY,POST_CORE,LOCALFOLDER known_first_party = pandas -known_third_party = _pytest,announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf +known_third_party = announce,dateutil,docutils,flake8,git,hypothesis,jinja2,lxml,matplotlib,numpy,numpydoc,pkg_resources,pyarrow,pytest,pytz,requests,scipy,setuptools,sphinx,sqlalchemy,validate_docstrings,validate_unwanted_patterns,yaml,odf multi_line_output = 3 include_trailing_comma = True force_grid_wrap = 0 From bdcc5bffaadb7488474b65554c4e8e96a00aa4af Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 30 Jul 2020 19:45:45 +0100 Subject: [PATCH 187/632] MAINT: Use float arange when required or intended (#35477) --- pandas/tests/window/test_base_indexer.py | 4 ++-- pandas/tests/window/test_ewm.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 4a0212e890d3a..2300d8dd5529b 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -140,7 +140,7 @@ def get_window_bounds(self, num_values, min_periods, center, closed): ) def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs): # GH 32865 - values = np.arange(10) + values = np.arange(10.0) values[5] = 100.0 indexer = FixedForwardWindowIndexer(window_size=3) @@ -177,7 +177,7 @@ def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs) @pytest.mark.parametrize("constructor", [Series, DataFrame]) def test_rolling_forward_skewness(constructor): - values = np.arange(10) + values = np.arange(10.0) values[5] = 100.0 indexer = FixedForwardWindowIndexer(window_size=5) diff --git a/pandas/tests/window/test_ewm.py b/pandas/tests/window/test_ewm.py index 12c314d5e9ec9..69cd1d1ba069c 100644 --- a/pandas/tests/window/test_ewm.py +++ b/pandas/tests/window/test_ewm.py @@ -108,7 +108,7 @@ def test_ewma_halflife_without_times(halflife_with_times): @pytest.mark.parametrize("min_periods", [0, 2]) def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods): halflife = halflife_with_times - data = np.arange(10) + data = np.arange(10.0) data[::2] = np.nan df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)}) result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean() From 6b2d0260c818e62052eaf535767f3a8c4b446c69 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Sat, 1 Aug 2020 03:43:00 -0500 Subject: [PATCH 188/632] CI: unpin isort 5 (#35470) --- asv_bench/benchmarks/frame_ctor.py | 2 +- asv_bench/benchmarks/gil.py | 8 +- asv_bench/benchmarks/io/parsers.py | 2 +- asv_bench/benchmarks/tslibs/normalize.py | 2 +- ci/code_checks.sh | 2 +- doc/source/development/contributing.rst | 4 +- environment.yml | 2 +- pandas/_config/config.py | 4 +- pandas/_libs/algos.pyx | 10 +-- pandas/_libs/groupby.pyx | 48 +++++++++--- pandas/_libs/hashing.pyx | 7 +- pandas/_libs/hashtable.pyx | 75 +++++++++---------- pandas/_libs/index.pyx | 8 +- pandas/_libs/internals.pyx | 3 + pandas/_libs/interval.pyx | 16 ++-- pandas/_libs/join.pyx | 9 ++- pandas/_libs/lib.pyx | 48 ++++++------ pandas/_libs/missing.pyx | 16 ++-- pandas/_libs/ops.pyx | 9 +-- pandas/_libs/parsers.pyx | 65 ++++++++++------ pandas/_libs/reduction.pyx | 9 ++- pandas/_libs/reshape.pyx | 2 + pandas/_libs/sparse.pyx | 15 +++- pandas/_libs/testing.pyx | 7 +- pandas/_libs/tslib.pyx | 19 ++--- pandas/_libs/tslibs/ccalendar.pyx | 2 +- pandas/_libs/tslibs/conversion.pyx | 58 +++++++++----- pandas/_libs/tslibs/fields.pyx | 29 ++++--- pandas/_libs/tslibs/nattype.pyx | 22 +++--- pandas/_libs/tslibs/np_datetime.pyx | 6 +- pandas/_libs/tslibs/offsets.pyx | 43 +++++++---- pandas/_libs/tslibs/parsing.pyx | 41 +++++----- pandas/_libs/tslibs/period.pyx | 62 +++++++-------- pandas/_libs/tslibs/strptime.pyx | 17 +++-- pandas/_libs/tslibs/timedeltas.pyx | 38 ++++++---- pandas/_libs/tslibs/timestamps.pyx | 62 ++++++++------- pandas/_libs/tslibs/timezones.pyx | 12 ++- pandas/_libs/tslibs/tzconversion.pyx | 18 +++-- pandas/_libs/tslibs/vectorized.pyx | 9 ++- pandas/_libs/window/aggregations.pyx | 9 ++- pandas/_libs/window/indexers.pyx | 3 +- pandas/_libs/writers.pyx | 2 +- pandas/_testing.py | 2 +- pandas/_typing.py | 10 ++- pandas/compat/pickle_compat.py | 2 +- pandas/core/apply.py | 2 +- pandas/core/arrays/categorical.py | 4 +- pandas/core/arrays/datetimelike.py | 2 +- pandas/core/arrays/integer.py | 1 + pandas/core/arrays/interval.py | 1 + pandas/core/arrays/period.py | 1 + pandas/core/arrays/sparse/accessor.py | 7 +- pandas/core/config_init.py | 6 +- pandas/core/construction.py | 10 +-- pandas/core/dtypes/cast.py | 3 +- pandas/core/dtypes/dtypes.py | 12 ++- pandas/core/frame.py | 6 +- pandas/core/groupby/generic.py | 2 +- pandas/core/groupby/grouper.py | 1 - pandas/core/indexes/base.py | 2 +- pandas/core/internals/ops.py | 2 +- pandas/core/strings.py | 6 +- pandas/core/tools/datetimes.py | 5 +- pandas/core/util/hashing.py | 2 +- pandas/io/clipboard/__init__.py | 16 ++-- pandas/io/excel/_base.py | 2 +- pandas/io/excel/_odfreader.py | 4 +- pandas/io/excel/_openpyxl.py | 2 +- pandas/io/excel/_xlrd.py | 4 +- pandas/io/formats/format.py | 2 +- pandas/io/formats/style.py | 2 +- pandas/io/html.py | 2 +- pandas/io/pytables.py | 2 +- pandas/io/sas/sas.pyx | 2 +- pandas/io/sql.py | 16 ++-- pandas/plotting/_matplotlib/core.py | 2 +- pandas/plotting/_matplotlib/timeseries.py | 2 +- pandas/tests/api/test_api.py | 3 +- pandas/tests/arrays/interval/test_interval.py | 4 + pandas/tests/arrays/test_period.py | 3 + pandas/tests/frame/test_analytics.py | 4 +- .../tests/indexes/datetimes/test_datetime.py | 1 + .../indexing/multiindex/test_indexing_slow.py | 2 +- pandas/tests/io/test_fsspec.py | 2 +- pandas/tests/io/test_gcs.py | 9 +-- pandas/tests/io/test_sql.py | 4 +- pandas/tests/plotting/common.py | 4 +- pandas/tests/plotting/test_frame.py | 8 +- pandas/tests/plotting/test_hist_method.py | 3 +- pandas/tests/plotting/test_misc.py | 10 ++- pandas/tests/plotting/test_series.py | 3 +- pandas/tests/series/indexing/test_datetime.py | 2 +- pandas/tests/series/methods/test_asof.py | 2 +- pandas/tests/series/test_arithmetic.py | 2 +- pandas/tests/test_downstream.py | 2 +- pandas/util/_doctools.py | 2 +- requirements-dev.txt | 2 +- 97 files changed, 612 insertions(+), 432 deletions(-) diff --git a/asv_bench/benchmarks/frame_ctor.py b/asv_bench/benchmarks/frame_ctor.py index dc6f45f810f3d..e0a2257b0ca1f 100644 --- a/asv_bench/benchmarks/frame_ctor.py +++ b/asv_bench/benchmarks/frame_ctor.py @@ -6,7 +6,7 @@ from .pandas_vb_common import tm try: - from pandas.tseries.offsets import Nano, Hour + from pandas.tseries.offsets import Hour, Nano except ImportError: # For compatibility with older versions from pandas.core.datetools import * # noqa diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index e266d871f5bc6..5d9070de92ec7 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -7,14 +7,14 @@ try: from pandas import ( - rolling_median, + rolling_kurt, + rolling_max, rolling_mean, + rolling_median, rolling_min, - rolling_max, - rolling_var, rolling_skew, - rolling_kurt, rolling_std, + rolling_var, ) have_rolling_methods = True diff --git a/asv_bench/benchmarks/io/parsers.py b/asv_bench/benchmarks/io/parsers.py index ec3eddfff7184..5390056ba36f2 100644 --- a/asv_bench/benchmarks/io/parsers.py +++ b/asv_bench/benchmarks/io/parsers.py @@ -2,8 +2,8 @@ try: from pandas._libs.tslibs.parsing import ( - concat_date_cols, _does_string_look_like_datetime, + concat_date_cols, ) except ImportError: # Avoid whole benchmark suite import failure on asv (currently 0.4) diff --git a/asv_bench/benchmarks/tslibs/normalize.py b/asv_bench/benchmarks/tslibs/normalize.py index 7d4e0556f4d96..9a206410d8775 100644 --- a/asv_bench/benchmarks/tslibs/normalize.py +++ b/asv_bench/benchmarks/tslibs/normalize.py @@ -1,5 +1,5 @@ try: - from pandas._libs.tslibs import normalize_i8_timestamps, is_date_array_normalized + from pandas._libs.tslibs import is_date_array_normalized, normalize_i8_timestamps except ImportError: from pandas._libs.tslibs.conversion import ( normalize_i8_timestamps, diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 7b12de387d648..69ce0f1adce22 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -121,7 +121,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # Imports - Check formatting using isort see setup.cfg for settings MSG='Check import format using isort' ; echo $MSG - ISORT_CMD="isort --quiet --recursive --check-only pandas asv_bench scripts" + ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts" if [[ "$GITHUB_ACTIONS" == "true" ]]; then eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]})) else diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index b85e9403038ab..1b0e36e7b6933 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -751,7 +751,7 @@ Imports are alphabetically sorted within these sections. As part of :ref:`Continuous Integration ` checks we run:: - isort --recursive --check-only pandas + isort --check-only pandas to check that imports are correctly formatted as per the `setup.cfg`. @@ -770,8 +770,6 @@ You should run:: to automatically format imports correctly. This will modify your local copy of the files. -The `--recursive` flag can be passed to sort all files in a directory. - Alternatively, you can run a command similar to what was suggested for ``black`` and ``flake8`` :ref:`right above `:: git diff upstream/master --name-only -- "*.py" | xargs -r isort diff --git a/environment.yml b/environment.yml index 3b088ca511be9..9efb995e29497 100644 --- a/environment.yml +++ b/environment.yml @@ -21,7 +21,7 @@ dependencies: - flake8<3.8.0 # temporary pin, GH#34150 - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions - flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files - - isort=4.3.21 # check that imports are in the right order + - isort>=5.2.1 # check that imports are in the right order - mypy=0.730 - pycodestyle # used by flake8 diff --git a/pandas/_config/config.py b/pandas/_config/config.py index f5e16cddeb04c..d7b73a0a685d3 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -442,8 +442,8 @@ def register_option( ValueError if `validator` is specified and `defval` is not a valid value. """ - import tokenize import keyword + import tokenize key = key.lower() @@ -660,8 +660,8 @@ def _build_option_description(k: str) -> str: def pp_options_list(keys: Iterable[str], width=80, _print: bool = False): """ Builds a concise listing of available options, grouped by prefix """ - from textwrap import wrap from itertools import groupby + from textwrap import wrap def pp(name: str, ks: Iterable[str]) -> List[str]: pfx = "- " + name + ".[" if name else "" diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 6b6ead795584f..7e90a8cc681ef 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -1,11 +1,12 @@ import cython from cython import Py_ssize_t -from libc.stdlib cimport malloc, free -from libc.string cimport memmove from libc.math cimport fabs, sqrt +from libc.stdlib cimport free, malloc +from libc.string cimport memmove import numpy as np + cimport numpy as cnp from numpy cimport ( NPY_FLOAT32, @@ -31,12 +32,11 @@ from numpy cimport ( uint32_t, uint64_t, ) + cnp.import_array() cimport pandas._libs.util as util -from pandas._libs.util cimport numeric, get_nat - from pandas._libs.khash cimport ( kh_destroy_int64, kh_get_int64, @@ -46,7 +46,7 @@ from pandas._libs.khash cimport ( kh_resize_int64, khiter_t, ) - +from pandas._libs.util cimport get_nat, numeric import pandas._libs.missing as missing diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 7c57e6ee9dbfd..38cb973d6dde9 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1,27 +1,51 @@ import cython from cython import Py_ssize_t -from cython cimport floating -from libc.stdlib cimport malloc, free +from cython cimport floating +from libc.stdlib cimport free, malloc import numpy as np + cimport numpy as cnp -from numpy cimport (ndarray, - int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, - uint32_t, uint64_t, float32_t, float64_t, complex64_t, complex128_t) +from numpy cimport ( + complex64_t, + complex128_t, + float32_t, + float64_t, + int8_t, + int16_t, + int32_t, + int64_t, + ndarray, + uint8_t, + uint16_t, + uint32_t, + uint64_t, +) from numpy.math cimport NAN -cnp.import_array() -from pandas._libs.util cimport numeric, get_nat +cnp.import_array() -from pandas._libs.algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE, - TIEBREAK_MIN, TIEBREAK_MAX, TIEBREAK_FIRST, - TIEBREAK_DENSE) -from pandas._libs.algos import (take_2d_axis1_float64_float64, - groupsort_indexer, tiebreakers) +from pandas._libs.algos cimport ( + TIEBREAK_AVERAGE, + TIEBREAK_DENSE, + TIEBREAK_FIRST, + TIEBREAK_MAX, + TIEBREAK_MIN, + TiebreakEnumType, + swap, +) +from pandas._libs.util cimport get_nat, numeric + +from pandas._libs.algos import ( + groupsort_indexer, + take_2d_axis1_float64_float64, + tiebreakers, +) from pandas._libs.missing cimport checknull + cdef int64_t NPY_NAT = get_nat() _int64_max = np.iinfo(np.int64).max diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index a98820ca57895..f2af04d91a3e3 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -2,10 +2,13 @@ # at https://github.com/veorq/SipHash import cython -from libc.stdlib cimport malloc, free + +from libc.stdlib cimport free, malloc import numpy as np -from numpy cimport ndarray, uint8_t, uint32_t, uint64_t, import_array + +from numpy cimport import_array, ndarray, uint8_t, uint32_t, uint64_t + import_array() from pandas._libs.util cimport is_nan diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index c3dcbb942d7fe..ffaf6d6505955 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -1,60 +1,57 @@ cimport cython - -from cpython.ref cimport PyObject, Py_INCREF -from cpython.mem cimport PyMem_Malloc, PyMem_Free - -from libc.stdlib cimport malloc, free +from cpython.mem cimport PyMem_Free, PyMem_Malloc +from cpython.ref cimport Py_INCREF, PyObject +from libc.stdlib cimport free, malloc import numpy as np + cimport numpy as cnp -from numpy cimport ndarray, uint8_t, uint32_t, float64_t +from numpy cimport float64_t, ndarray, uint8_t, uint32_t from numpy.math cimport NAN + cnp.import_array() +from pandas._libs cimport util from pandas._libs.khash cimport ( - khiter_t, - kh_str_t, - kh_init_str, - kh_put_str, - kh_exist_str, - kh_get_str, - kh_destroy_str, - kh_resize_str, - kh_put_strbox, - kh_get_strbox, - kh_init_strbox, - kh_int64_t, - kh_init_int64, - kh_resize_int64, + kh_destroy_float64, kh_destroy_int64, - kh_get_int64, + kh_destroy_pymap, + kh_destroy_str, + kh_destroy_uint64, + kh_exist_float64, kh_exist_int64, - kh_put_int64, + kh_exist_pymap, + kh_exist_str, + kh_exist_uint64, kh_float64_t, - kh_exist_float64, - kh_put_float64, - kh_init_float64, kh_get_float64, - kh_destroy_float64, - kh_resize_float64, - kh_resize_uint64, - kh_exist_uint64, - kh_destroy_uint64, - kh_put_uint64, + kh_get_int64, + kh_get_pymap, + kh_get_str, + kh_get_strbox, kh_get_uint64, - kh_init_uint64, - kh_destroy_pymap, - kh_exist_pymap, + kh_init_float64, + kh_init_int64, kh_init_pymap, - kh_get_pymap, + kh_init_str, + kh_init_strbox, + kh_init_uint64, + kh_int64_t, + kh_put_float64, + kh_put_int64, kh_put_pymap, + kh_put_str, + kh_put_strbox, + kh_put_uint64, + kh_resize_float64, + kh_resize_int64, kh_resize_pymap, + kh_resize_str, + kh_resize_uint64, + kh_str_t, + khiter_t, ) - - -from pandas._libs cimport util - from pandas._libs.missing cimport checknull diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 35c4b73b47695..d6659cc1895b1 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -1,6 +1,7 @@ import warnings import numpy as np + cimport numpy as cnp from numpy cimport ( float32_t, @@ -16,17 +17,16 @@ from numpy cimport ( uint32_t, uint64_t, ) + cnp.import_array() from pandas._libs cimport util - +from pandas._libs.hashtable cimport HashTable from pandas._libs.tslibs.nattype cimport c_NaT as NaT from pandas._libs.tslibs.period cimport is_period_object -from pandas._libs.tslibs.timestamps cimport _Timestamp from pandas._libs.tslibs.timedeltas cimport _Timedelta - -from pandas._libs.hashtable cimport HashTable +from pandas._libs.tslibs.timestamps cimport _Timestamp from pandas._libs import algos, hashtable as _hash from pandas._libs.missing import checknull diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 8b4b490f49b12..4f27fde52414a 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -5,12 +5,15 @@ from cython import Py_ssize_t from cpython.slice cimport PySlice_GetIndicesEx + cdef extern from "Python.h": Py_ssize_t PY_SSIZE_T_MAX import numpy as np + cimport numpy as cnp from numpy cimport NPY_INT64, int64_t + cnp.import_array() from pandas._libs.algos import ensure_int64 diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 95881ebf1385c..6867e8aba7411 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -1,7 +1,8 @@ import numbers from operator import le, lt -from cpython.datetime cimport PyDelta_Check, PyDateTime_IMPORT +from cpython.datetime cimport PyDateTime_IMPORT, PyDelta_Check + PyDateTime_IMPORT from cpython.object cimport ( @@ -16,8 +17,8 @@ from cpython.object cimport ( import cython from cython import Py_ssize_t - import numpy as np + cimport numpy as cnp from numpy cimport ( NPY_QUICKSORT, @@ -30,22 +31,21 @@ from numpy cimport ( ndarray, uint64_t, ) + cnp.import_array() from pandas._libs cimport util - from pandas._libs.hashtable cimport Int64Vector +from pandas._libs.tslibs.timedeltas cimport _Timedelta +from pandas._libs.tslibs.timestamps cimport _Timestamp +from pandas._libs.tslibs.timezones cimport tz_compare from pandas._libs.tslibs.util cimport ( - is_integer_object, is_float_object, + is_integer_object, is_timedelta64_object, ) -from pandas._libs.tslibs.timezones cimport tz_compare -from pandas._libs.tslibs.timestamps cimport _Timestamp -from pandas._libs.tslibs.timedeltas cimport _Timedelta - _VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 54892a7e4bc77..13c7187923473 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -1,7 +1,7 @@ import cython from cython import Py_ssize_t - import numpy as np + cimport numpy as cnp from numpy cimport ( float32_t, @@ -16,6 +16,7 @@ from numpy cimport ( uint32_t, uint64_t, ) + cnp.import_array() from pandas._libs.algos import ( @@ -640,7 +641,11 @@ def outer_join_indexer(ndarray[join_t] left, ndarray[join_t] right): # ---------------------------------------------------------------------- from pandas._libs.hashtable cimport ( - HashTable, PyObjectHashTable, UInt64HashTable, Int64HashTable) + HashTable, + Int64HashTable, + PyObjectHashTable, + UInt64HashTable, +) ctypedef fused asof_t: uint8_t diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 5ecbb2c3ffd35..5fa91ffee8ea8 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -5,23 +5,24 @@ import warnings import cython from cython import Py_ssize_t -from cpython.object cimport PyObject_RichCompareBool, Py_EQ -from cpython.ref cimport Py_INCREF -from cpython.tuple cimport PyTuple_SET_ITEM, PyTuple_New -from cpython.iterator cimport PyIter_Check -from cpython.sequence cimport PySequence_Check -from cpython.number cimport PyNumber_Check - from cpython.datetime cimport ( - PyDateTime_Check, PyDate_Check, - PyTime_Check, - PyDelta_Check, + PyDateTime_Check, PyDateTime_IMPORT, + PyDelta_Check, + PyTime_Check, ) +from cpython.iterator cimport PyIter_Check +from cpython.number cimport PyNumber_Check +from cpython.object cimport Py_EQ, PyObject_RichCompareBool +from cpython.ref cimport Py_INCREF +from cpython.sequence cimport PySequence_Check +from cpython.tuple cimport PyTuple_New, PyTuple_SET_ITEM + PyDateTime_IMPORT import numpy as np + cimport numpy as cnp from numpy cimport ( NPY_OBJECT, @@ -39,6 +40,7 @@ from numpy cimport ( uint8_t, uint64_t, ) + cnp.import_array() cdef extern from "numpy/arrayobject.h": @@ -63,28 +65,23 @@ cdef extern from "src/parse_helper.h": int floatify(object, float64_t *result, int *maybe_int) except -1 from pandas._libs cimport util -from pandas._libs.util cimport is_nan, UINT64_MAX, INT64_MAX, INT64_MIN +from pandas._libs.util cimport INT64_MAX, INT64_MIN, UINT64_MAX, is_nan from pandas._libs.tslib import array_to_datetime -from pandas._libs.tslibs.nattype cimport ( - NPY_NAT, - c_NaT as NaT, - checknull_with_nat, -) -from pandas._libs.tslibs.conversion cimport convert_to_tsobject -from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64 -from pandas._libs.tslibs.timezones cimport tz_compare -from pandas._libs.tslibs.period cimport is_period_object -from pandas._libs.tslibs.offsets cimport is_offset_object from pandas._libs.missing cimport ( + C_NA, checknull, - isnaobj, is_null_datetime64, is_null_timedelta64, - C_NA, + isnaobj, ) - +from pandas._libs.tslibs.conversion cimport convert_to_tsobject +from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT, checknull_with_nat +from pandas._libs.tslibs.offsets cimport is_offset_object +from pandas._libs.tslibs.period cimport is_period_object +from pandas._libs.tslibs.timedeltas cimport convert_to_timedelta64 +from pandas._libs.tslibs.timezones cimport tz_compare # constants that will be compared to potentially arbitrarily large # python int @@ -1317,8 +1314,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str: if not isinstance(value, list): value = list(value) - from pandas.core.dtypes.cast import ( - construct_1d_object_array_from_listlike) + from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike values = construct_1d_object_array_from_listlike(value) # make contiguous diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index fdd06fe631b97..760fab3781fd4 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -1,27 +1,25 @@ -import cython -from cython import Py_ssize_t - import numbers +import cython +from cython import Py_ssize_t import numpy as np + cimport numpy as cnp -from numpy cimport ndarray, int64_t, uint8_t, float64_t +from numpy cimport float64_t, int64_t, ndarray, uint8_t + cnp.import_array() from pandas._libs cimport util - - -from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value from pandas._libs.tslibs.nattype cimport ( c_NaT as NaT, checknull_with_nat, is_null_datetimelike, ) -from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op +from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value +from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op from pandas.compat import is_platform_32bit - cdef: float64_t INF = np.inf float64_t NEGINF = -INF diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx index 658600cdfbe6c..d1f897d237c1b 100644 --- a/pandas/_libs/ops.pyx +++ b/pandas/_libs/ops.pyx @@ -10,18 +10,17 @@ from cpython.object cimport ( PyObject_RichCompareBool, ) - import cython from cython import Py_ssize_t - import numpy as np -from numpy cimport ndarray, uint8_t, import_array -import_array() +from numpy cimport import_array, ndarray, uint8_t + +import_array() -from pandas._libs.util cimport UINT8_MAX, is_nan from pandas._libs.missing cimport checknull +from pandas._libs.util cimport UINT8_MAX, is_nan @cython.wraparound(False) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 6ffb036e01595..fa77af6bd5a25 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -1,6 +1,8 @@ # Copyright (c) 2012, Lambda Foundry, Inc. # See LICENSE for the license import bz2 +from csv import QUOTE_MINIMAL, QUOTE_NONE, QUOTE_NONNUMERIC +from errno import ENOENT import gzip import io import os @@ -9,17 +11,14 @@ import time import warnings import zipfile -from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE -from errno import ENOENT - from libc.stdlib cimport free -from libc.string cimport strncpy, strlen, strcasecmp +from libc.string cimport strcasecmp, strlen, strncpy import cython from cython import Py_ssize_t from cpython.bytes cimport PyBytes_AsString, PyBytes_FromString -from cpython.exc cimport PyErr_Occurred, PyErr_Fetch +from cpython.exc cimport PyErr_Fetch, PyErr_Occurred from cpython.object cimport PyObject from cpython.ref cimport Py_XDECREF from cpython.unicode cimport PyUnicode_AsUTF8String, PyUnicode_Decode @@ -30,37 +29,59 @@ cdef extern from "Python.h": import numpy as np + cimport numpy as cnp -from numpy cimport ndarray, uint8_t, uint64_t, int64_t, float64_t +from numpy cimport float64_t, int64_t, ndarray, uint8_t, uint64_t + cnp.import_array() from pandas._libs cimport util -from pandas._libs.util cimport UINT64_MAX, INT64_MAX, INT64_MIN +from pandas._libs.util cimport INT64_MAX, INT64_MIN, UINT64_MAX + import pandas._libs.lib as lib from pandas._libs.khash cimport ( - khiter_t, - kh_str_t, kh_init_str, kh_put_str, kh_exist_str, - kh_get_str, kh_destroy_str, - kh_float64_t, kh_get_float64, kh_destroy_float64, - kh_put_float64, kh_init_float64, kh_resize_float64, - kh_strbox_t, kh_put_strbox, kh_get_strbox, kh_init_strbox, + kh_destroy_float64, + kh_destroy_str, + kh_destroy_str_starts, kh_destroy_strbox, - kh_str_starts_t, kh_put_str_starts_item, kh_init_str_starts, - kh_get_str_starts_item, kh_destroy_str_starts, kh_resize_str_starts) + kh_exist_str, + kh_float64_t, + kh_get_float64, + kh_get_str, + kh_get_str_starts_item, + kh_get_strbox, + kh_init_float64, + kh_init_str, + kh_init_str_starts, + kh_init_strbox, + kh_put_float64, + kh_put_str, + kh_put_str_starts_item, + kh_put_strbox, + kh_resize_float64, + kh_resize_str_starts, + kh_str_starts_t, + kh_str_t, + kh_strbox_t, + khiter_t, +) + +from pandas.compat import _get_lzma_file, _import_lzma +from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning from pandas.core.dtypes.common import ( + is_bool_dtype, is_categorical_dtype, - is_integer_dtype, is_float_dtype, - is_bool_dtype, is_object_dtype, is_datetime64_dtype, - pandas_dtype, is_extension_array_dtype) + is_extension_array_dtype, + is_float_dtype, + is_integer_dtype, + is_object_dtype, + pandas_dtype, +) from pandas.core.dtypes.concat import union_categoricals -from pandas.compat import _import_lzma, _get_lzma_file -from pandas.errors import (ParserError, DtypeWarning, - EmptyDataError, ParserWarning) - lzma = _import_lzma() cdef: diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index a01e0c5705dcf..7b36bc8baf891 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -2,15 +2,18 @@ from copy import copy from cython import Py_ssize_t -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc import numpy as np + cimport numpy as cnp -from numpy cimport ndarray, int64_t +from numpy cimport int64_t, ndarray + cnp.import_array() from pandas._libs cimport util -from pandas._libs.lib import maybe_convert_objects, is_scalar + +from pandas._libs.lib import is_scalar, maybe_convert_objects cdef _check_result_array(object obj, Py_ssize_t cnt): diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index da4dd00027395..5c6c15fb50fed 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -16,7 +16,9 @@ from numpy cimport ( ) import numpy as np + cimport numpy as cnp + cnp.import_array() from pandas._libs.lib cimport c_is_list_like diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index 7c9575d921dc9..321d7c374d8ec 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -1,9 +1,18 @@ import cython - import numpy as np + cimport numpy as cnp -from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t, - float64_t, float32_t) +from numpy cimport ( + float32_t, + float64_t, + int8_t, + int16_t, + int32_t, + int64_t, + ndarray, + uint8_t, +) + cnp.import_array() diff --git a/pandas/_libs/testing.pyx b/pandas/_libs/testing.pyx index 785a4d1f8b923..64fc8d615ea9c 100644 --- a/pandas/_libs/testing.pyx +++ b/pandas/_libs/testing.pyx @@ -1,13 +1,16 @@ import math import numpy as np + from numpy cimport import_array + import_array() from pandas._libs.util cimport is_array -from pandas.core.dtypes.missing import isna, array_equivalent from pandas.core.dtypes.common import is_dtype_equal +from pandas.core.dtypes.missing import array_equivalent, isna + cdef NUMERIC_TYPES = ( bool, @@ -129,6 +132,7 @@ cpdef assert_almost_equal(a, b, if not isiterable(b): from pandas._testing import assert_class_equal + # classes can't be the same, to raise error assert_class_equal(a, b, obj=obj) @@ -181,6 +185,7 @@ cpdef assert_almost_equal(a, b, elif isiterable(b): from pandas._testing import assert_class_equal + # classes can't be the same, to raise error assert_class_equal(a, b, obj=obj) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 35d5cd8f1e275..e4128af62d06d 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -7,23 +7,20 @@ from cpython.datetime cimport ( datetime, tzinfo, ) + # import datetime C API PyDateTime_IMPORT cimport numpy as cnp from numpy cimport float64_t, int64_t, ndarray + import numpy as np + cnp.import_array() import pytz -from pandas._libs.util cimport ( - is_datetime64_object, - is_float_object, - is_integer_object, -) - from pandas._libs.tslibs.np_datetime cimport ( _string_to_dts, check_dts_bounds, @@ -34,9 +31,9 @@ from pandas._libs.tslibs.np_datetime cimport ( pydate_to_dt64, pydatetime_to_dt64, ) +from pandas._libs.util cimport is_datetime64_object, is_float_object, is_integer_object from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime - from pandas._libs.tslibs.parsing import parse_datetime_string from pandas._libs.tslibs.conversion cimport ( @@ -45,22 +42,18 @@ from pandas._libs.tslibs.conversion cimport ( convert_datetime_to_tsobject, get_datetime64_nanos, ) - from pandas._libs.tslibs.nattype cimport ( NPY_NAT, c_NaT as NaT, c_nat_strings as nat_strings, ) - from pandas._libs.tslibs.timestamps cimport _Timestamp -from pandas._libs.tslibs.timestamps import Timestamp -from pandas._libs.tslibs.tzconversion cimport ( - tz_localize_to_utc_single, -) +from pandas._libs.tslibs.timestamps import Timestamp # Note: this is the only non-tslibs intra-pandas dependency here from pandas._libs.missing cimport checknull_with_nat_and_na +from pandas._libs.tslibs.tzconversion cimport tz_localize_to_utc_single def _test_parse_iso8601(ts: str): diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 00cecd25e5225..6cce2f5e1fd95 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -5,7 +5,7 @@ Cython implementations of functions resembling the stdlib calendar module import cython -from numpy cimport int64_t, int32_t +from numpy cimport int32_t, int64_t # ---------------------------------------------------------------------- # Constants diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 8cc3d25e86340..adf1dfbc1ac72 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -1,44 +1,68 @@ import cython - import numpy as np + cimport numpy as cnp -from numpy cimport int64_t, int32_t, intp_t, ndarray +from numpy cimport int32_t, int64_t, intp_t, ndarray + cnp.import_array() import pytz # stdlib datetime imports -from cpython.datetime cimport (datetime, time, tzinfo, - PyDateTime_Check, PyDate_Check, - PyDateTime_IMPORT) + +from cpython.datetime cimport ( + PyDate_Check, + PyDateTime_Check, + PyDateTime_IMPORT, + datetime, + time, + tzinfo, +) + PyDateTime_IMPORT from pandas._libs.tslibs.base cimport ABCTimestamp - from pandas._libs.tslibs.np_datetime cimport ( - check_dts_bounds, npy_datetimestruct, pandas_datetime_to_datetimestruct, - _string_to_dts, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64, - get_datetime64_unit, get_datetime64_value, pydatetime_to_dt64, - NPY_DATETIMEUNIT, NPY_FR_ns) -from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime + NPY_DATETIMEUNIT, + NPY_FR_ns, + _string_to_dts, + check_dts_bounds, + dt64_to_dtstruct, + dtstruct_to_dt64, + get_datetime64_unit, + get_datetime64_value, + npy_datetime, + npy_datetimestruct, + pandas_datetime_to_datetimestruct, + pydatetime_to_dt64, +) -from pandas._libs.tslibs.util cimport ( - is_datetime64_object, is_integer_object, is_float_object) +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.timezones cimport ( - is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info, - maybe_get_tz, tz_compare, + get_dst_info, + get_utcoffset, + is_fixed_offset, + is_tzlocal, + is_utc, + maybe_get_tz, + tz_compare, utc_pytz as UTC, ) +from pandas._libs.tslibs.util cimport ( + is_datetime64_object, + is_float_object, + is_integer_object, +) + from pandas._libs.tslibs.parsing import parse_datetime_string from pandas._libs.tslibs.nattype cimport ( NPY_NAT, - checknull_with_nat, c_NaT as NaT, c_nat_strings as nat_strings, + checknull_with_nat, ) - from pandas._libs.tslibs.tzconversion cimport ( tz_convert_utc_to_tzlocal, tz_localize_to_utc_single, diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 1d1f900bc18b3..16fa05c3801c6 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -6,26 +6,37 @@ from locale import LC_TIME import cython from cython import Py_ssize_t - import numpy as np + cimport numpy as cnp -from numpy cimport ndarray, int64_t, int32_t, int8_t, uint32_t +from numpy cimport int8_t, int32_t, int64_t, ndarray, uint32_t + cnp.import_array() from pandas._config.localization import set_locale -from pandas._libs.tslibs.ccalendar import MONTHS_FULL, DAYS_FULL +from pandas._libs.tslibs.ccalendar import DAYS_FULL, MONTHS_FULL + from pandas._libs.tslibs.ccalendar cimport ( - get_days_in_month, is_leapyear, dayofweek, get_week_of_year, - get_day_of_year, get_iso_calendar, iso_calendar_t, - month_offset, + dayofweek, + get_day_of_year, + get_days_in_month, get_firstbday, + get_iso_calendar, get_lastbday, + get_week_of_year, + is_leapyear, + iso_calendar_t, + month_offset, ) -from pandas._libs.tslibs.np_datetime cimport ( - npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, - td64_to_tdstruct) from pandas._libs.tslibs.nattype cimport NPY_NAT +from pandas._libs.tslibs.np_datetime cimport ( + dt64_to_dtstruct, + npy_datetimestruct, + pandas_timedeltastruct, + td64_to_tdstruct, +) + from pandas._libs.tslibs.strptime import LocaleTime diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 264013f928d22..73df51832d700 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -1,3 +1,10 @@ +from cpython.datetime cimport ( + PyDateTime_Check, + PyDateTime_IMPORT, + PyDelta_Check, + datetime, + timedelta, +) from cpython.object cimport ( Py_EQ, Py_GE, @@ -8,28 +15,19 @@ from cpython.object cimport ( PyObject_RichCompare, ) -from cpython.datetime cimport ( - PyDateTime_Check, - PyDateTime_IMPORT, - PyDelta_Check, - datetime, - timedelta, -) PyDateTime_IMPORT from cpython.version cimport PY_MINOR_VERSION import numpy as np + cimport numpy as cnp from numpy cimport int64_t + cnp.import_array() -from pandas._libs.tslibs.np_datetime cimport ( - get_datetime64_value, - get_timedelta64_value, -) cimport pandas._libs.tslibs.util as util - +from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value # ---------------------------------------------------------------------- # Constants diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 31cc55ad981bb..12aaaf4ce3977 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -1,5 +1,3 @@ -from cpython.object cimport Py_EQ, Py_NE, Py_GE, Py_GT, Py_LT, Py_LE - from cpython.datetime cimport ( PyDateTime_DATE_GET_HOUR, PyDateTime_DATE_GET_MICROSECOND, @@ -10,11 +8,15 @@ from cpython.datetime cimport ( PyDateTime_GET_YEAR, PyDateTime_IMPORT, ) +from cpython.object cimport Py_EQ, Py_GE, Py_GT, Py_LE, Py_LT, Py_NE + PyDateTime_IMPORT from numpy cimport int64_t + from pandas._libs.tslibs.util cimport get_c_string_buf_and_size + cdef extern from "src/datetime/np_datetime.h": int cmp_npy_datetimestruct(npy_datetimestruct *a, npy_datetimestruct *b) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 9a7ca15a2a1c2..ac2725fc58aee 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1,39 +1,51 @@ -import cython - import operator import re import time from typing import Any import warnings -from cpython.datetime cimport (PyDateTime_IMPORT, - PyDateTime_Check, - PyDate_Check, - PyDelta_Check, - datetime, timedelta, date, - time as dt_time) + +import cython + +from cpython.datetime cimport ( + PyDate_Check, + PyDateTime_Check, + PyDateTime_IMPORT, + PyDelta_Check, + date, + datetime, + time as dt_time, + timedelta, +) + PyDateTime_IMPORT -from dateutil.relativedelta import relativedelta from dateutil.easter import easter - +from dateutil.relativedelta import relativedelta import numpy as np + cimport numpy as cnp from numpy cimport int64_t, ndarray + cnp.import_array() # TODO: formalize having _libs.properties "above" tslibs in the dependency structure + from pandas._libs.properties import cache_readonly from pandas._libs.tslibs cimport util from pandas._libs.tslibs.util cimport ( - is_integer_object, is_datetime64_object, is_float_object, + is_integer_object, ) from pandas._libs.tslibs.ccalendar import ( - MONTH_ALIASES, MONTH_TO_CAL_NUM, weekday_to_int, int_to_weekday, + MONTH_ALIASES, + MONTH_TO_CAL_NUM, + int_to_weekday, + weekday_to_int, ) + from pandas._libs.tslibs.ccalendar cimport ( DAY_NANOS, dayofweek, @@ -47,17 +59,20 @@ from pandas._libs.tslibs.conversion cimport ( ) from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( - npy_datetimestruct, - dtstruct_to_dt64, dt64_to_dtstruct, + dtstruct_to_dt64, + npy_datetimestruct, pydate_to_dtstruct, ) from pandas._libs.tslibs.tzconversion cimport tz_convert_from_utc_single from .dtypes cimport PeriodDtypeCode from .timedeltas cimport delta_to_nanoseconds + from .timedeltas import Timedelta + from .timestamps cimport _Timestamp + from .timestamps import Timestamp # --------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index c4f369d0d3b3f..8429aebbd85b8 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -9,39 +9,44 @@ from libc.string cimport strchr import cython from cython import Py_ssize_t -from cpython.object cimport PyObject_Str - from cpython.datetime cimport datetime, datetime_new, import_datetime, tzinfo +from cpython.object cimport PyObject_Str from cpython.version cimport PY_VERSION_HEX + import_datetime() import numpy as np + cimport numpy as cnp -from numpy cimport (PyArray_GETITEM, PyArray_ITER_DATA, PyArray_ITER_NEXT, - PyArray_IterNew, flatiter, float64_t) +from numpy cimport ( + PyArray_GETITEM, + PyArray_ITER_DATA, + PyArray_ITER_NEXT, + PyArray_IterNew, + flatiter, + float64_t, +) + cnp.import_array() # dateutil compat -from dateutil.tz import (tzoffset, - tzlocal as _dateutil_tzlocal, - tzutc as _dateutil_tzutc, - tzstr as _dateutil_tzstr) + +from dateutil.parser import DEFAULTPARSER, parse as du_parse from dateutil.relativedelta import relativedelta -from dateutil.parser import DEFAULTPARSER -from dateutil.parser import parse as du_parse +from dateutil.tz import ( + tzlocal as _dateutil_tzlocal, + tzoffset, + tzstr as _dateutil_tzstr, + tzutc as _dateutil_tzutc, +) from pandas._config import get_option from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS -from pandas._libs.tslibs.nattype cimport ( - c_nat_strings as nat_strings, - c_NaT as NaT, -) -from pandas._libs.tslibs.util cimport ( - is_array, - get_c_string_buf_and_size, -) +from pandas._libs.tslibs.nattype cimport c_NaT as NaT, c_nat_strings as nat_strings from pandas._libs.tslibs.offsets cimport is_offset_object +from pandas._libs.tslibs.util cimport get_c_string_buf_and_size, is_array + cdef extern from "../src/headers/portable.h": int getdigit_ascii(char c, int default) nogil diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 20961c6da56bd..86b6533f5caf5 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1,96 +1,98 @@ import warnings -from cpython.object cimport PyObject_RichCompareBool, Py_EQ, Py_NE +from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompareBool +from numpy cimport import_array, int64_t, ndarray -from numpy cimport int64_t, import_array, ndarray import numpy as np + import_array() from libc.stdlib cimport free, malloc +from libc.string cimport memset, strlen from libc.time cimport strftime, tm -from libc.string cimport strlen, memset import cython from cpython.datetime cimport ( - datetime, PyDate_Check, PyDateTime_Check, PyDateTime_IMPORT, PyDelta_Check, + datetime, ) + # import datetime C API PyDateTime_IMPORT from pandas._libs.tslibs.np_datetime cimport ( - npy_datetimestruct, - dtstruct_to_dt64, - dt64_to_dtstruct, - pandas_datetime_to_datetimestruct, - check_dts_bounds, NPY_DATETIMEUNIT, NPY_FR_D, NPY_FR_us, + check_dts_bounds, + dt64_to_dtstruct, + dtstruct_to_dt64, + npy_datetimestruct, + pandas_datetime_to_datetimestruct, ) + cdef extern from "src/datetime/np_datetime.h": int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d) nogil cimport pandas._libs.tslibs.util as util -from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.timedeltas import Timedelta -from pandas._libs.tslibs.timedeltas cimport ( - delta_to_nanoseconds, - is_any_td_scalar, -) +from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.ccalendar cimport ( + c_MONTH_NUMBERS, dayofweek, get_day_of_year, - is_leapyear, - get_week_of_year, get_days_in_month, + get_week_of_year, + is_leapyear, ) -from pandas._libs.tslibs.ccalendar cimport c_MONTH_NUMBERS +from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds, is_any_td_scalar + from pandas._libs.tslibs.conversion import ensure_datetime64ns from pandas._libs.tslibs.dtypes cimport ( - PeriodDtypeBase, - FR_UND, FR_ANN, - FR_QTR, - FR_MTH, - FR_WK, FR_BUS, FR_DAY, FR_HR, FR_MIN, - FR_SEC, FR_MS, - FR_US, + FR_MTH, FR_NS, + FR_QTR, + FR_SEC, + FR_UND, + FR_US, + FR_WK, + PeriodDtypeBase, attrname_to_abbrevs, ) - from pandas._libs.tslibs.parsing cimport get_rule_month + from pandas._libs.tslibs.parsing import parse_time_string + from pandas._libs.tslibs.nattype cimport ( - _nat_scalar_rules, NPY_NAT, - is_null_datetimelike, + _nat_scalar_rules, c_NaT as NaT, c_nat_strings as nat_strings, + is_null_datetimelike, ) from pandas._libs.tslibs.offsets cimport ( BaseOffset, - to_offset, - is_tick_object, is_offset_object, + is_tick_object, + to_offset, ) -from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG cdef: enum: diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 660b582f73e6e..d2690be905a68 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -1,27 +1,30 @@ """Strptime-related classes and functions. """ -import time -import locale import calendar +import locale import re +import time from cpython.datetime cimport date, tzinfo from _thread import allocate_lock as _thread_allocate_lock +import numpy as np import pytz -import numpy as np from numpy cimport int64_t -from pandas._libs.tslibs.np_datetime cimport ( - check_dts_bounds, dtstruct_to_dt64, npy_datetimestruct) - from pandas._libs.tslibs.nattype cimport ( - checknull_with_nat, NPY_NAT, c_nat_strings as nat_strings, + checknull_with_nat, ) +from pandas._libs.tslibs.np_datetime cimport ( + check_dts_bounds, + dtstruct_to_dt64, + npy_datetimestruct, +) + cdef dict _parse_code_table = {'y': 0, 'Y': 1, diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 8f3a599bf107c..ee32ed53a908b 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -2,39 +2,47 @@ import collections import cython -from cpython.object cimport Py_NE, Py_EQ, PyObject_RichCompare +from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompare import numpy as np + cimport numpy as cnp from numpy cimport int64_t, ndarray + cnp.import_array() -from cpython.datetime cimport (timedelta, - PyDateTime_Check, PyDelta_Check, - PyDateTime_IMPORT) +from cpython.datetime cimport ( + PyDateTime_Check, + PyDateTime_IMPORT, + PyDelta_Check, + timedelta, +) + PyDateTime_IMPORT cimport pandas._libs.tslibs.util as util -from pandas._libs.tslibs.util cimport ( - is_timedelta64_object, is_datetime64_object, is_integer_object, - is_float_object, is_array -) - from pandas._libs.tslibs.base cimport ABCTimestamp - from pandas._libs.tslibs.conversion cimport cast_from_unit - -from pandas._libs.tslibs.np_datetime cimport ( - cmp_scalar, td64_to_tdstruct, pandas_timedeltastruct) - from pandas._libs.tslibs.nattype cimport ( - checknull_with_nat, NPY_NAT, c_NaT as NaT, c_nat_strings as nat_strings, + checknull_with_nat, +) +from pandas._libs.tslibs.np_datetime cimport ( + cmp_scalar, + pandas_timedeltastruct, + td64_to_tdstruct, ) from pandas._libs.tslibs.offsets cimport is_tick_object +from pandas._libs.tslibs.util cimport ( + is_array, + is_datetime64_object, + is_float_object, + is_integer_object, + is_timedelta64_object, +) # ---------------------------------------------------------------------- # Constants diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 8cef685933863..bddfc30d86a53 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -9,54 +9,66 @@ shadows the python class, where we do any heavy lifting. import warnings import numpy as np + cimport numpy as cnp -from numpy cimport int64_t, int8_t, uint8_t, ndarray -cnp.import_array() +from numpy cimport int8_t, int64_t, ndarray, uint8_t -from cpython.object cimport (PyObject_RichCompareBool, PyObject_RichCompare, - Py_EQ, Py_NE) +cnp.import_array() -from cpython.datetime cimport ( - datetime, - time, - tzinfo, - tzinfo as tzinfo_type, # alias bc `tzinfo` is a kwarg below +from cpython.datetime cimport ( # alias bc `tzinfo` is a kwarg below PyDateTime_Check, + PyDateTime_IMPORT, PyDelta_Check, PyTZInfo_Check, - PyDateTime_IMPORT, -) -PyDateTime_IMPORT - -from pandas._libs.tslibs.util cimport ( - is_datetime64_object, is_float_object, is_integer_object, - is_timedelta64_object, is_array, + datetime, + time, + tzinfo as tzinfo_type, ) +from cpython.object cimport Py_EQ, Py_NE, PyObject_RichCompare, PyObject_RichCompareBool -from pandas._libs.tslibs.base cimport ABCTimestamp +PyDateTime_IMPORT from pandas._libs.tslibs cimport ccalendar - +from pandas._libs.tslibs.base cimport ABCTimestamp from pandas._libs.tslibs.conversion cimport ( _TSObject, - convert_to_tsobject, convert_datetime_to_tsobject, + convert_to_tsobject, normalize_i8_stamp, ) -from pandas._libs.tslibs.fields import get_start_end_field, get_date_name_field +from pandas._libs.tslibs.util cimport ( + is_array, + is_datetime64_object, + is_float_object, + is_integer_object, + is_timedelta64_object, +) + +from pandas._libs.tslibs.fields import get_date_name_field, get_start_end_field + from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( - check_dts_bounds, npy_datetimestruct, dt64_to_dtstruct, + check_dts_bounds, cmp_scalar, + dt64_to_dtstruct, + npy_datetimestruct, pydatetime_to_dt64, ) + from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime -from pandas._libs.tslibs.offsets cimport to_offset, is_offset_object -from pandas._libs.tslibs.timedeltas cimport is_any_td_scalar, delta_to_nanoseconds + +from pandas._libs.tslibs.offsets cimport is_offset_object, to_offset +from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds, is_any_td_scalar + from pandas._libs.tslibs.timedeltas import Timedelta + from pandas._libs.tslibs.timezones cimport ( - is_utc, maybe_get_tz, treat_tz_as_pytz, utc_pytz as UTC, - get_timezone, tz_compare, + get_timezone, + is_utc, + maybe_get_tz, + treat_tz_as_pytz, + tz_compare, + utc_pytz as UTC, ) from pandas._libs.tslibs.tzconversion cimport ( tz_convert_from_utc_single, diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index a8c785704d8e8..b82291a71057e 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -1,27 +1,31 @@ from datetime import timezone + from cpython.datetime cimport datetime, timedelta, tzinfo # dateutil compat + from dateutil.tz import ( gettz as dateutil_gettz, tzfile as _dateutil_tzfile, tzlocal as _dateutil_tzlocal, tzutc as _dateutil_tzutc, ) - - -from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo import pytz +from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo + UTC = pytz.utc import numpy as np + cimport numpy as cnp from numpy cimport int64_t + cnp.import_array() # ---------------------------------------------------------------------- -from pandas._libs.tslibs.util cimport is_integer_object, get_nat +from pandas._libs.tslibs.util cimport get_nat, is_integer_object + cdef int64_t NPY_NAT = get_nat() cdef tzinfo utc_stdlib = timezone.utc diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 606639af16a18..2b148cd8849f1 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -5,21 +5,27 @@ import cython from cython import Py_ssize_t from cpython.datetime cimport ( - PyDateTime_IMPORT, PyDelta_Check, datetime, timedelta, tzinfo) + PyDateTime_IMPORT, + PyDelta_Check, + datetime, + timedelta, + tzinfo, +) + PyDateTime_IMPORT -import pytz from dateutil.tz import tzutc - import numpy as np +import pytz + cimport numpy as cnp -from numpy cimport ndarray, int64_t, uint8_t, intp_t +from numpy cimport int64_t, intp_t, ndarray, uint8_t + cnp.import_array() from pandas._libs.tslibs.ccalendar cimport DAY_NANOS, HOUR_NANOS from pandas._libs.tslibs.nattype cimport NPY_NAT -from pandas._libs.tslibs.np_datetime cimport ( - npy_datetimestruct, dt64_to_dtstruct) +from pandas._libs.tslibs.np_datetime cimport dt64_to_dtstruct, npy_datetimestruct from pandas._libs.tslibs.timezones cimport ( get_dst_info, get_utcoffset, diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index c8f8daf6724c2..bdc00f6c6e21a 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -1,18 +1,21 @@ import cython -from cpython.datetime cimport datetime, date, time, tzinfo +from cpython.datetime cimport date, datetime, time, tzinfo import numpy as np + from numpy cimport int64_t, intp_t, ndarray from .conversion cimport normalize_i8_stamp + from .dtypes import Resolution + from .nattype cimport NPY_NAT, c_NaT as NaT -from .np_datetime cimport npy_datetimestruct, dt64_to_dtstruct +from .np_datetime cimport dt64_to_dtstruct, npy_datetimestruct from .offsets cimport to_offset from .period cimport get_period_ordinal from .timestamps cimport create_timestamp_from_ts -from .timezones cimport is_utc, is_tzlocal, get_dst_info +from .timezones cimport get_dst_info, is_tzlocal, is_utc from .tzconversion cimport tz_convert_utc_to_tzlocal # ------------------------------------------------------------------------- diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 362d0e6263697..3ec4547d223ce 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -2,13 +2,15 @@ import cython from cython import Py_ssize_t -from libcpp.deque cimport deque -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc +from libcpp.deque cimport deque import numpy as np + cimport numpy as cnp -from numpy cimport ndarray, int64_t, float64_t, float32_t, uint8_t +from numpy cimport float32_t, float64_t, int64_t, ndarray, uint8_t + cnp.import_array() @@ -22,6 +24,7 @@ from pandas._libs.algos import is_monotonic from pandas._libs.util cimport numeric + cdef extern from "../src/skiplist.h": ctypedef struct node_t: node_t **next diff --git a/pandas/_libs/window/indexers.pyx b/pandas/_libs/window/indexers.pyx index 8a1e7feb57ace..9af1159a805ec 100644 --- a/pandas/_libs/window/indexers.pyx +++ b/pandas/_libs/window/indexers.pyx @@ -1,7 +1,8 @@ # cython: boundscheck=False, wraparound=False, cdivision=True import numpy as np -from numpy cimport ndarray, int64_t + +from numpy cimport int64_t, ndarray # Cython routines for window indexers diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 2d5b31d7ccbcf..40c39aabb7a7a 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -5,8 +5,8 @@ from cpython.bytes cimport PyBytes_GET_SIZE from cpython.unicode cimport PyUnicode_GET_SIZE import numpy as np -from numpy cimport ndarray, uint8_t +from numpy cimport ndarray, uint8_t ctypedef fused pandas_string: str diff --git a/pandas/_testing.py b/pandas/_testing.py index 1cf9304ed2715..a020fbff3553a 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -535,7 +535,7 @@ def rands(nchars): def close(fignum=None): - from matplotlib.pyplot import get_fignums, close as _close + from matplotlib.pyplot import close as _close, get_fignums if fignum is None: for fignum in get_fignums(): diff --git a/pandas/_typing.py b/pandas/_typing.py index 8e98833ad37f7..76ec527e6e258 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -24,13 +24,15 @@ # https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles if TYPE_CHECKING: from pandas._libs import Period, Timedelta, Timestamp # noqa: F401 - from pandas.core.arrays.base import ExtensionArray # noqa: F401 + from pandas.core.dtypes.dtypes import ExtensionDtype # noqa: F401 - from pandas.core.indexes.base import Index # noqa: F401 - from pandas.core.generic import NDFrame # noqa: F401 + from pandas import Interval # noqa: F401 - from pandas.core.series import Series # noqa: F401 + from pandas.core.arrays.base import ExtensionArray # noqa: F401 from pandas.core.frame import DataFrame # noqa: F401 + from pandas.core.generic import NDFrame # noqa: F401 + from pandas.core.indexes.base import Index # noqa: F401 + from pandas.core.series import Series # noqa: F401 # array-like diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 0484de3fa165d..015b203a60256 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -14,7 +14,7 @@ from pandas import Index if TYPE_CHECKING: - from pandas import Series, DataFrame + from pandas import DataFrame, Series def load_reduce(self): diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 733dbeed34b72..6b8d7dc35fe95 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -15,7 +15,7 @@ from pandas.core.construction import create_series_with_explicit_dtype if TYPE_CHECKING: - from pandas import DataFrame, Series, Index + from pandas import DataFrame, Index, Series ResType = Dict[int, Any] diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index db9cfd9d7fc59..6e5c7bc699962 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -520,7 +520,7 @@ def _from_inferred_categories( ------- Categorical """ - from pandas import Index, to_numeric, to_datetime, to_timedelta + from pandas import Index, to_datetime, to_numeric, to_timedelta cats = Index(inferred_categories) known_categories = ( @@ -1403,7 +1403,7 @@ def value_counts(self, dropna=True): -------- Series.value_counts """ - from pandas import Series, CategoricalIndex + from pandas import CategoricalIndex, Series code, cat = self._codes, self.categories ncat, mask = len(cat), 0 <= code diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ee4d43fdb3bc2..c6945e2f78b5a 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -959,7 +959,7 @@ def value_counts(self, dropna=False): ------- Series """ - from pandas import Series, Index + from pandas import Index, Series if dropna: values = self[~self.isna()]._data diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index b0958af41158c..57df067c7b16e 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -116,6 +116,7 @@ def __from_arrow__( Construct IntegerArray from pyarrow Array/ChunkedArray. """ import pyarrow # noqa: F811 + from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask pyarrow_type = pyarrow.from_numpy_dtype(self.type) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c861d25afd13f..ed2437cc061bd 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1105,6 +1105,7 @@ def __arrow_array__(self, type=None): Convert myself into a pyarrow Array. """ import pyarrow + from pandas.core.arrays._arrow_utils import ArrowIntervalType try: diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 8d5cb12d60e4d..fe78481d99d30 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -300,6 +300,7 @@ def __arrow_array__(self, type=None): Convert myself into a pyarrow Array. """ import pyarrow + from pandas.core.arrays._arrow_utils import ArrowPeriodType if type is not None: diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index 8a30d2b954b55..da8d695c59b9e 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -87,8 +87,8 @@ def from_coo(cls, A, dense_index=False): 1 0 3.0 dtype: Sparse[float64, nan] """ - from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series from pandas import Series + from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series result = _coo_to_sparse_series(A, dense_index=dense_index) result = Series(result.array, index=result.index, copy=False) @@ -253,9 +253,10 @@ def from_spmatrix(cls, data, index=None, columns=None): 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ - from pandas import DataFrame from pandas._libs.sparse import IntIndex + from pandas import DataFrame + data = data.tocsc() index, columns = cls._prep_index(data, index, columns) n_rows, n_columns = data.shape @@ -354,8 +355,8 @@ def density(self) -> float: @staticmethod def _prep_index(data, index, columns): - import pandas.core.indexes.base as ibase from pandas.core.indexes.api import ensure_index + import pandas.core.indexes.base as ibase N, K = data.shape if index is None: diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 86f6be77bc505..2b2431149e230 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -662,8 +662,10 @@ def register_plotting_backend_cb(key): def register_converter_cb(key): - from pandas.plotting import register_matplotlib_converters - from pandas.plotting import deregister_matplotlib_converters + from pandas.plotting import ( + deregister_matplotlib_converters, + register_matplotlib_converters, + ) if cf.get_option(key): register_matplotlib_converters() diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 6c58698989e96..47f10f1f65f4a 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -48,9 +48,9 @@ import pandas.core.common as com if TYPE_CHECKING: - from pandas.core.series import Series # noqa: F401 - from pandas.core.indexes.api import Index # noqa: F401 from pandas.core.arrays import ExtensionArray # noqa: F401 + from pandas.core.indexes.api import Index # noqa: F401 + from pandas.core.series import Series # noqa: F401 def array( @@ -255,14 +255,14 @@ def array( ValueError: Cannot pass scalar '1' to 'pandas.array'. """ from pandas.core.arrays import ( - period_array, BooleanArray, + DatetimeArray, IntegerArray, IntervalArray, PandasArray, - DatetimeArray, - TimedeltaArray, StringArray, + TimedeltaArray, + period_array, ) if lib.is_scalar(data): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 6b84f0e81f48b..228329898b6a4 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1244,6 +1244,7 @@ def try_datetime(v): # if so coerce to a DatetimeIndex; if they are not the same, # then these stay as object dtype, xref GH19671 from pandas._libs.tslibs import conversion + from pandas import DatetimeIndex try: @@ -1303,8 +1304,8 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"): try to cast the array/value to a datetimelike dtype, converting float nan to iNaT """ - from pandas.core.tools.timedeltas import to_timedelta from pandas.core.tools.datetimes import to_datetime + from pandas.core.tools.timedeltas import to_timedelta if dtype is not None: if isinstance(dtype, str): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 22480fbc47508..8350e136417b1 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -30,12 +30,13 @@ if TYPE_CHECKING: import pyarrow # noqa: F401 + + from pandas import Categorical # noqa: F401 from pandas.core.arrays import ( # noqa: F401 + DatetimeArray, IntervalArray, PeriodArray, - DatetimeArray, ) - from pandas import Categorical # noqa: F401 str_type = str @@ -391,12 +392,13 @@ def __repr__(self) -> str_type: @staticmethod def _hash_categories(categories, ordered: Ordered = True) -> int: + from pandas.core.dtypes.common import DT64NS_DTYPE, is_datetime64tz_dtype + from pandas.core.util.hashing import ( - hash_array, _combine_hash_arrays, + hash_array, hash_tuples, ) - from pandas.core.dtypes.common import is_datetime64tz_dtype, DT64NS_DTYPE if len(categories) and isinstance(categories[0], tuple): # assumes if any individual category is a tuple, then all our. ATM @@ -939,6 +941,7 @@ def __from_arrow__( Construct PeriodArray from pyarrow Array/ChunkedArray. """ import pyarrow # noqa: F811 + from pandas.core.arrays import PeriodArray from pandas.core.arrays._arrow_utils import pyarrow_array_to_numpy_and_mask @@ -1136,6 +1139,7 @@ def __from_arrow__( Construct IntervalArray from pyarrow Array/ChunkedArray. """ import pyarrow # noqa: F811 + from pandas.core.arrays import IntervalArray if isinstance(array, pyarrow.Array): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3f634c1e6e1ff..79627e43d78c2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -150,6 +150,7 @@ if TYPE_CHECKING: from pandas.core.groupby.generic import DataFrameGroupBy + from pandas.io.formats.style import Styler # --------------------------------------------------------------------- @@ -5205,8 +5206,9 @@ def duplicated( 4 True dtype: bool """ + from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64 + from pandas.core.sorting import get_group_index - from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: return self._constructor_sliced(dtype=bool) @@ -7868,8 +7870,8 @@ def join( def _join_compat( self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False ): - from pandas.core.reshape.merge import merge from pandas.core.reshape.concat import concat + from pandas.core.reshape.merge import merge if isinstance(other, Series): if other.name is None: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index ec7b14f27c5a1..c50b753cf3293 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -681,8 +681,8 @@ def value_counts( self, normalize=False, sort=True, ascending=False, bins=None, dropna=True ): - from pandas.core.reshape.tile import cut from pandas.core.reshape.merge import _get_join_indexers + from pandas.core.reshape.tile import cut if bins is not None and not np.iterable(bins): # scalar bins cannot be done at top level diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 67003dffb90bb..8239a792c65dd 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -237,7 +237,6 @@ def __new__(cls, *args, **kwargs): # core/groupby/grouper.py::Grouper # raising these warnings from TimeGrouper directly would fail the test: # tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base - # hacky way to set the stacklevel: if cls is TimeGrouper it means # that the call comes from a pandas internal call of resample, # otherwise it comes from pd.Grouper diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 986d6323e704e..1be381e38b157 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5731,9 +5731,9 @@ def _maybe_cast_data_without_dtype(subarr): """ # Runtime import needed bc IntervalArray imports Index from pandas.core.arrays import ( + DatetimeArray, IntervalArray, PeriodArray, - DatetimeArray, TimedeltaArray, ) diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index fd9a9a5ef6c93..6eedf72726acb 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -5,8 +5,8 @@ from pandas._typing import ArrayLike if TYPE_CHECKING: - from pandas.core.internals.managers import BlockManager # noqa:F401 from pandas.core.internals.blocks import Block # noqa:F401 + from pandas.core.internals.managers import BlockManager # noqa:F401 def operate_blockwise( diff --git a/pandas/core/strings.py b/pandas/core/strings.py index a1db7742916de..6702bf519c52e 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -155,7 +155,7 @@ def _map_stringarray( an ndarray. """ - from pandas.arrays import IntegerArray, StringArray, BooleanArray + from pandas.arrays import BooleanArray, IntegerArray, StringArray mask = isna(arr) @@ -2186,7 +2186,7 @@ def _wrap_result( returns_string=True, ): - from pandas import Index, Series, MultiIndex + from pandas import Index, MultiIndex, Series # for category, we do the stuff on the categories, so blow it up # to the full series again @@ -2292,7 +2292,7 @@ def _get_series_list(self, others): list of Series Others transformed into list of Series. """ - from pandas import Series, DataFrame + from pandas import DataFrame, Series # self._orig is either Series or Index idx = self._orig if isinstance(self._orig, ABCIndexClass) else self._orig.index diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 0adab143f6052..7aac2f793f61a 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -53,9 +53,10 @@ from pandas.core.indexes.datetimes import DatetimeIndex if TYPE_CHECKING: - from pandas import Series # noqa:F401 from pandas._libs.tslibs.nattype import NaTType # noqa:F401 + from pandas import Series # noqa:F401 + # --------------------------------------------------------------------- # types used in annotations @@ -876,7 +877,7 @@ def _assemble_from_unit_mappings(arg, errors, tz): ------- Series """ - from pandas import to_timedelta, to_numeric, DataFrame + from pandas import DataFrame, to_numeric, to_timedelta arg = DataFrame(arg) if not arg.columns.is_unique: diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 1b56b6d5a46fa..d79b9f4092325 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -275,7 +275,7 @@ def hash_array( # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: - from pandas import factorize, Categorical, Index + from pandas import Categorical, Index, factorize codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index 40bff5a75709b..d16955a98b62f 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -311,17 +311,17 @@ def init_windows_clipboard(): global HGLOBAL, LPVOID, DWORD, LPCSTR, INT global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE from ctypes.wintypes import ( - HGLOBAL, - LPVOID, + BOOL, DWORD, - LPCSTR, - INT, - HWND, + HANDLE, + HGLOBAL, HINSTANCE, HMENU, - BOOL, + HWND, + INT, + LPCSTR, + LPVOID, UINT, - HANDLE, ) windll = ctypes.windll @@ -528,8 +528,8 @@ def determine_clipboard(): # Setup for the MAC OS X platform: if os.name == "mac" or platform.system() == "Darwin": try: - import Foundation # check if pyobjc is installed import AppKit + import Foundation # check if pyobjc is installed except ImportError: return init_osx_pbcopy_clipboard() else: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 2a12f779230b2..b1bbda4a4b7e0 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -834,8 +834,8 @@ class ExcelFile: from pandas.io.excel._odfreader import _ODFReader from pandas.io.excel._openpyxl import _OpenpyxlReader - from pandas.io.excel._xlrd import _XlrdReader from pandas.io.excel._pyxlsb import _PyxlsbReader + from pandas.io.excel._xlrd import _XlrdReader _engines = { "xlrd": _XlrdReader, diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 85ec9afaaec25..44abaf5d3b3c9 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -191,9 +191,9 @@ def _get_cell_string_value(self, cell) -> str: Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters. """ - from odf.element import Text, Element - from odf.text import S, P + from odf.element import Element, Text from odf.namespaces import TEXTNS + from odf.text import P, S text_p = P().qname text_s = S().qname diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 0696d82e51f34..03a30cbd62f9a 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -225,7 +225,7 @@ def _convert_to_fill(cls, fill_dict): ------- fill : openpyxl.styles.Fill """ - from openpyxl.styles import PatternFill, GradientFill + from openpyxl.styles import GradientFill, PatternFill _pattern_fill_key_map = { "patternType": "fill_type", diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index 8f7d3b1368fc7..af82c15fd6b66 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -48,11 +48,11 @@ def get_sheet_by_index(self, index): def get_sheet_data(self, sheet, convert_float): from xlrd import ( - xldate, + XL_CELL_BOOLEAN, XL_CELL_DATE, XL_CELL_ERROR, - XL_CELL_BOOLEAN, XL_CELL_NUMBER, + xldate, ) epoch1904 = self.book.datemode diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index fe85eab4bfbf5..c05f79f935548 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -72,7 +72,7 @@ from pandas.io.formats.printing import adjoin, justify, pprint_thing if TYPE_CHECKING: - from pandas import Series, DataFrame, Categorical + from pandas import Categorical, DataFrame, Series FormattersType = Union[ List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable] diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index d11144938eb26..fd1efa2d1b668 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -42,8 +42,8 @@ try: - import matplotlib.pyplot as plt from matplotlib import colors + import matplotlib.pyplot as plt has_mpl = True except ImportError: diff --git a/pandas/io/html.py b/pandas/io/html.py index 3193f52d239f1..8354cf413814e 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -707,8 +707,8 @@ def _build_doc(self): -------- pandas.io.html._HtmlFrameParser._build_doc """ - from lxml.html import parse, fromstring, HTMLParser from lxml.etree import XMLSyntaxError + from lxml.html import HTMLParser, fromstring, parse parser = HTMLParser(recover=True, encoding=self.encoding) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b67a1c5781d91..e0df4c29e543e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -57,7 +57,7 @@ from pandas.io.formats.printing import adjoin, pprint_thing if TYPE_CHECKING: - from tables import File, Node, Col # noqa:F401 + from tables import Col, File, Node # noqa:F401 # versioning attribute diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index 0038e39e2ffcc..17b41fd2b4379 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -1,8 +1,8 @@ # cython: profile=False # cython: boundscheck=False, initializedcheck=False from cython import Py_ssize_t - import numpy as np + import pandas.io.sas.sas_constants as const ctypedef signed long long int64_t diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 9177696ca13d6..c87391eaa62b1 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -937,7 +937,7 @@ def _get_column_names_and_types(self, dtype_mapper): return column_names_and_types def _create_table_setup(self): - from sqlalchemy import Table, Column, PrimaryKeyConstraint + from sqlalchemy import Column, PrimaryKeyConstraint, Table column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) @@ -1026,15 +1026,15 @@ def _sqlalchemy_type(self, col): col_type = lib.infer_dtype(col, skipna=True) from sqlalchemy.types import ( + TIMESTAMP, BigInteger, - Integer, - Float, - Text, Boolean, - DateTime, Date, + DateTime, + Float, + Integer, + Text, Time, - TIMESTAMP, ) if col_type == "datetime64" or col_type == "datetime": @@ -1079,7 +1079,7 @@ def _sqlalchemy_type(self, col): return Text def _get_dtype(self, sqltype): - from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP + from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer if isinstance(sqltype, Float): return float @@ -1374,7 +1374,7 @@ def to_sql( dtype = {col_name: dtype for col_name in frame} if dtype is not None: - from sqlalchemy.types import to_instance, TypeEngine + from sqlalchemy.types import TypeEngine, to_instance for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 353bc8a8936a5..b490e07e43753 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1149,8 +1149,8 @@ def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): @classmethod def _ts_plot(cls, ax, x, data, style=None, **kwds): from pandas.plotting._matplotlib.timeseries import ( - _maybe_resample, _decorate_axes, + _maybe_resample, format_dateaxis, ) diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 8f3571cf13cbc..95f9fbf3995ed 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -24,7 +24,7 @@ from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod if TYPE_CHECKING: - from pandas import Series, Index # noqa:F401 + from pandas import Index, Series # noqa:F401 # --------------------------------------------------------------------- diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index ecd20796b6f21..caa348d3a1fb9 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -267,9 +267,10 @@ def test_sparsearray(): def test_np(): - import numpy as np import warnings + import numpy as np + with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) assert (pd.np.arange(0, 10) == np.arange(0, 10)).all() diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index d517eaaec68d2..0176755b54dd1 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -142,6 +142,7 @@ def test_repr(): @pyarrow_skip def test_arrow_extension_type(): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowIntervalType p1 = ArrowIntervalType(pa.int64(), "left") @@ -158,6 +159,7 @@ def test_arrow_extension_type(): @pyarrow_skip def test_arrow_array(): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowIntervalType intervals = pd.interval_range(1, 5, freq=1).array @@ -187,6 +189,7 @@ def test_arrow_array(): @pyarrow_skip def test_arrow_array_missing(): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowIntervalType arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0]) @@ -221,6 +224,7 @@ def test_arrow_array_missing(): ) def test_arrow_table_roundtrip(breaks): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowIntervalType arr = IntervalArray.from_breaks(breaks) diff --git a/pandas/tests/arrays/test_period.py b/pandas/tests/arrays/test_period.py index 8887dd0278afe..0d81e8e733842 100644 --- a/pandas/tests/arrays/test_period.py +++ b/pandas/tests/arrays/test_period.py @@ -359,6 +359,7 @@ def test_arrow_extension_type(): ) def test_arrow_array(data, freq): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowPeriodType periods = period_array(data, freq=freq) @@ -384,6 +385,7 @@ def test_arrow_array(data, freq): @pyarrow_skip def test_arrow_array_missing(): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowPeriodType arr = PeriodArray([1, 2, 3], freq="D") @@ -399,6 +401,7 @@ def test_arrow_array_missing(): @pyarrow_skip def test_arrow_table_roundtrip(): import pyarrow as pa + from pandas.core.arrays._arrow_utils import ArrowPeriodType arr = PeriodArray([1, 2, 3], freq="D") diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 9d6b9f39a0578..52a1e3aae9058 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -287,7 +287,7 @@ def test_stat_op_api(self, float_frame, float_string_frame): assert_stat_op_api("median", float_frame, float_string_frame) try: - from scipy.stats import skew, kurtosis # noqa:F401 + from scipy.stats import kurtosis, skew # noqa:F401 assert_stat_op_api("skew", float_frame, float_string_frame) assert_stat_op_api("kurt", float_frame, float_string_frame) @@ -370,7 +370,7 @@ def kurt(x): ) try: - from scipy import skew, kurtosis # noqa:F401 + from scipy import kurtosis, skew # noqa:F401 assert_stat_op_calc("skew", skewness, float_frame_with_na) assert_stat_op_calc("kurt", kurt, float_frame_with_na) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index ec4162f87010f..7bb1d98086a91 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -59,6 +59,7 @@ def test_reindex_with_same_tz(self): def test_time_loc(self): # GH8667 from datetime import time + from pandas._libs.index import _SIZE_CUTOFF ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64) diff --git a/pandas/tests/indexing/multiindex/test_indexing_slow.py b/pandas/tests/indexing/multiindex/test_indexing_slow.py index be193e0854d8d..d8e56661b7d61 100644 --- a/pandas/tests/indexing/multiindex/test_indexing_slow.py +++ b/pandas/tests/indexing/multiindex/test_indexing_slow.py @@ -15,7 +15,7 @@ def test_multiindex_get_loc(): # GH7724, GH2646 with warnings.catch_warnings(record=True): # test indexing into a multi-index before & past the lexsort depth - from numpy.random import randint, choice, randn + from numpy.random import choice, randint, randn cols = ["jim", "joe", "jolie", "joline", "jolia"] diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index c397a61616c1c..d64e2d1933ace 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -37,8 +37,8 @@ def test_read_csv(cleared_fs): def test_reasonable_error(monkeypatch, cleared_fs): - from fsspec.registry import known_implementations from fsspec import registry + from fsspec.registry import known_implementations registry.target.clear() with pytest.raises(ValueError) as e: diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index 4d93119ffa3f5..eacf4fa08545d 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -11,8 +11,7 @@ @td.skip_if_no("gcsfs") def test_read_csv_gcs(monkeypatch): - from fsspec import AbstractFileSystem - from fsspec import registry + from fsspec import AbstractFileSystem, registry registry.target.clear() # noqa # remove state @@ -37,8 +36,7 @@ def open(*args, **kwargs): @td.skip_if_no("gcsfs") def test_to_csv_gcs(monkeypatch): - from fsspec import AbstractFileSystem - from fsspec import registry + from fsspec import AbstractFileSystem, registry registry.target.clear() # noqa # remove state df1 = DataFrame( @@ -76,8 +74,7 @@ def mock_get_filepath_or_buffer(*args, **kwargs): @td.skip_if_no("gcsfs") def test_to_parquet_gcs_new_file(monkeypatch, tmpdir): """Regression test for writing to a not-yet-existent GCS Parquet file.""" - from fsspec import AbstractFileSystem - from fsspec import registry + from fsspec import AbstractFileSystem, registry registry.target.clear() # noqa # remove state df1 = DataFrame( diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 0991fae39138e..29b787d39c09d 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -48,10 +48,10 @@ try: import sqlalchemy - import sqlalchemy.schema - import sqlalchemy.sql.sqltypes as sqltypes from sqlalchemy.ext import declarative from sqlalchemy.orm import session as sa_session + import sqlalchemy.schema + import sqlalchemy.sql.sqltypes as sqltypes SQLALCHEMY_INSTALLED = True except ImportError: diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 896d3278cdde1..3b1ff233c5ec1 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -13,7 +13,6 @@ from pandas import DataFrame, Series import pandas._testing as tm - """ This is a common base class used for various plotting tests """ @@ -24,6 +23,7 @@ class TestPlotBase: def setup_method(self, method): import matplotlib as mpl + from pandas.plotting._matplotlib import compat mpl.rcdefaults() @@ -187,8 +187,8 @@ def _check_colors( Series used for color grouping key used for andrew_curves, parallel_coordinates, radviz test """ + from matplotlib.collections import Collection, LineCollection, PolyCollection from matplotlib.lines import Line2D - from matplotlib.collections import Collection, PolyCollection, LineCollection conv = self.colorconverter if linecolors is not None: diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 317a994bd9a32..ee43e5d7072fe 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2408,8 +2408,8 @@ def test_specified_props_kwd_plot_box(self, props, expected): assert result[expected][0].get_color() == "C1" def test_default_color_cycle(self): - import matplotlib.pyplot as plt import cycler + import matplotlib.pyplot as plt colors = list("rgbk") plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors) @@ -2953,8 +2953,8 @@ def _check(axes): @td.skip_if_no_scipy def test_memory_leak(self): """ Check that every plot type gets properly collected. """ - import weakref import gc + import weakref results = {} for kind in plotting.PlotAccessor._all_kinds: @@ -3032,8 +3032,8 @@ def test_df_subplots_patterns_minorticks(self): @pytest.mark.slow def test_df_gridspec_patterns(self): # GH 10819 - import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec + import matplotlib.pyplot as plt ts = Series(np.random.randn(10), index=date_range("1/1/2000", periods=10)) @@ -3422,9 +3422,9 @@ def test_xlabel_ylabel_dataframe_subplots( def _generate_4_axes_via_gridspec(): - import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.gridspec # noqa + import matplotlib.pyplot as plt gs = mpl.gridspec.GridSpec(2, 2) ax_tl = plt.subplot(gs[0, 0]) diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index b6a6c326c3df3..34c881855d16a 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -101,7 +101,7 @@ def test_hist_layout_with_by(self): @pytest.mark.slow def test_hist_no_overlap(self): - from matplotlib.pyplot import subplot, gcf + from matplotlib.pyplot import gcf, subplot x = Series(randn(2)) y = Series(randn(2)) @@ -352,6 +352,7 @@ class TestDataFrameGroupByPlots(TestPlotBase): @pytest.mark.slow def test_grouped_hist_legacy(self): from matplotlib.patches import Rectangle + from pandas.plotting._matplotlib.hist import _grouped_hist df = DataFrame(randn(500, 2), columns=["A", "B"]) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 75eeede472fe9..f5c1c58f3f7ed 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -131,9 +131,10 @@ def test_scatter_matrix_axis(self): @pytest.mark.slow def test_andrews_curves(self, iris): - from pandas.plotting import andrews_curves from matplotlib import cm + from pandas.plotting import andrews_curves + df = iris _check_plot_works(andrews_curves, frame=df, class_column="Name") @@ -206,9 +207,10 @@ def test_andrews_curves(self, iris): @pytest.mark.slow def test_parallel_coordinates(self, iris): - from pandas.plotting import parallel_coordinates from matplotlib import cm + from pandas.plotting import parallel_coordinates + df = iris ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name") @@ -279,9 +281,10 @@ def test_parallel_coordinates_with_sorted_labels(self): @pytest.mark.slow def test_radviz(self, iris): - from pandas.plotting import radviz from matplotlib import cm + from pandas.plotting import radviz + df = iris _check_plot_works(radviz, frame=df, class_column="Name") @@ -397,6 +400,7 @@ def test_get_standard_colors_no_appending(self): # Make sure not to add more colors so that matplotlib can cycle # correctly. from matplotlib import cm + from pandas.plotting._matplotlib.style import _get_standard_colors color_before = cm.gnuplot(range(5)) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 151bb3bed7207..cc00626e992f3 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -452,7 +452,7 @@ def test_hist_layout_with_by(self): @pytest.mark.slow def test_hist_no_overlap(self): - from matplotlib.pyplot import subplot, gcf + from matplotlib.pyplot import gcf, subplot x = Series(randn(2)) y = Series(randn(2)) @@ -827,6 +827,7 @@ def test_standard_colors(self): @pytest.mark.slow def test_standard_colors_all(self): import matplotlib.colors as colors + from pandas.plotting._matplotlib.style import _get_standard_colors # multiple colors like mediumaquamarine diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 0b34fab7b80b1..088f8681feb99 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -11,7 +11,6 @@ from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range import pandas._testing as tm - """ Also test support for datetime64[ns] in Series / DataFrame """ @@ -166,6 +165,7 @@ def test_getitem_setitem_datetime_tz_pytz(): def test_getitem_setitem_datetime_tz_dateutil(): from dateutil.tz import tzutc + from pandas._libs.tslibs.timezones import dateutil_gettz as gettz tz = ( diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py index 19caf4eccf748..4b4ef5ea046be 100644 --- a/pandas/tests/series/methods/test_asof.py +++ b/pandas/tests/series/methods/test_asof.py @@ -90,7 +90,7 @@ def test_with_nan(self): tm.assert_series_equal(result, expected) def test_periodindex(self): - from pandas import period_range, PeriodIndex + from pandas import PeriodIndex, period_range # array or list or dates N = 50 diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 5c8a0d224c4f9..ef2bafd4ea2ad 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -195,8 +195,8 @@ def test_add_with_duplicate_index(self): tm.assert_series_equal(result, expected) def test_add_na_handling(self): - from decimal import Decimal from datetime import date + from decimal import Decimal s = Series( [Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)] diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index e718a6b759963..b32c5e91af295 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -90,7 +90,7 @@ def test_statsmodels(): def test_scikit_learn(df): sklearn = import_module("sklearn") # noqa - from sklearn import svm, datasets + from sklearn import datasets, svm digits = datasets.load_digits() clf = svm.SVC(gamma=0.001, C=100.0) diff --git a/pandas/util/_doctools.py b/pandas/util/_doctools.py index f413490764124..3a8a1a3144269 100644 --- a/pandas/util/_doctools.py +++ b/pandas/util/_doctools.py @@ -53,8 +53,8 @@ def plot(self, left, right, labels=None, vertical: bool = True): vertical : bool, default True If True, use vertical layout. If False, use horizontal layout. """ - import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec + import matplotlib.pyplot as plt if not isinstance(left, list): left = [left] diff --git a/requirements-dev.txt b/requirements-dev.txt index 7bf3df176b378..c0dd77cd73ddc 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -11,7 +11,7 @@ cpplint flake8<3.8.0 flake8-comprehensions>=3.1.0 flake8-rst>=0.6.0,<=0.7.0 -isort==4.3.21 +isort>=5.2.1 mypy==0.730 pycodestyle gitpython From ea92c454cdc1f89f02c6efb2a86c95d5d3529bd0 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 1 Aug 2020 13:46:27 +0100 Subject: [PATCH 189/632] CI: xfail numpy-dev (#35502) --- pandas/tests/indexes/common.py | 12 +++++++++++- pandas/tests/indexing/test_loc.py | 3 +++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index c8b780455f862..f5b9f4a401e60 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -5,6 +5,7 @@ import pytest from pandas._libs import iNaT +from pandas.compat.numpy import _is_numpy_dev from pandas.errors import InvalidIndexError from pandas.core.dtypes.common import is_datetime64tz_dtype @@ -417,7 +418,7 @@ def test_set_ops_error_cases(self, case, method, index): with pytest.raises(TypeError, match=msg): getattr(index, method)(case) - def test_intersection_base(self, index): + def test_intersection_base(self, index, request): if isinstance(index, CategoricalIndex): return @@ -434,6 +435,15 @@ def test_intersection_base(self, index): # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: + # https://github.com/pandas-dev/pandas/issues/35481 + if ( + _is_numpy_dev + and isinstance(case, Series) + and isinstance(index, UInt64Index) + ): + mark = pytest.mark.xfail(reason="gh-35481") + request.node.add_marker(mark) + result = first.intersection(case) assert tm.equalContents(result, second) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 30b13b6ea9fce..193800fae751f 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -5,6 +5,8 @@ import numpy as np import pytest +from pandas.compat.numpy import _is_numpy_dev + import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range import pandas._testing as tm @@ -945,6 +947,7 @@ def test_loc_setitem_empty_append(self): df.loc[0, "x"] = expected.loc[0, "x"] tm.assert_frame_equal(df, expected) + @pytest.mark.xfail(_is_numpy_dev, reason="gh-35481") def test_loc_setitem_empty_append_raises(self): # GH6173, various appends to an empty dataframe From 5223cd17944e90dd67a5e58773ab665c37e57ad4 Mon Sep 17 00:00:00 2001 From: Alex Lim Date: Sat, 1 Aug 2020 09:10:24 -0400 Subject: [PATCH 190/632] DOC: Expanded Using a Docker Container section (#35345) (#35379) --- doc/source/development/contributing.rst | 32 +++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 1b0e36e7b6933..4ffd1d586a99a 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -153,14 +153,38 @@ to build the documentation locally before pushing your changes. Using a Docker container ~~~~~~~~~~~~~~~~~~~~~~~~ -Instead of manually setting up a development environment, you can use Docker to -automatically create the environment with just several commands. Pandas provides a `DockerFile` -in the root directory to build a Docker image with a full pandas development environment. +Instead of manually setting up a development environment, you can use `Docker +`_ to automatically create the environment with just several +commands. Pandas provides a `DockerFile` in the root directory to build a Docker image +with a full pandas development environment. -Even easier, you can use the DockerFile to launch a remote session with Visual Studio Code, +**Docker Commands** + +Pass your GitHub username in the `DockerFile` to use your own fork:: + + # Build the image pandas-yourname-env + docker build --tag pandas-yourname-env . + # Run a container and bind your local forked repo, pandas-yourname, to the container + docker run -it --rm -v path-to-pandas-yourname:/home/pandas-yourname pandas-yourname-env + +Even easier, you can integrate Docker with the following IDEs: + +**Visual Studio Code** + +You can use the DockerFile to launch a remote session with Visual Studio Code, a popular free IDE, using the `.devcontainer.json` file. See https://code.visualstudio.com/docs/remote/containers for details. +**PyCharm (Professional)** + +Enable Docker support and use the Services tool window to build and manage images as well as +run and interact with containers. +See https://www.jetbrains.com/help/pycharm/docker.html for details. + +Note that you might need to rebuild the C extensions if/when you merge with upstream/master using:: + + python setup.py build_ext --inplace -j 4 + .. _contributing.dev_c: Installing a C compiler From 52e815f59a4504ed81c0c1c2b79714c30fdade08 Mon Sep 17 00:00:00 2001 From: Adam Spannbauer Date: Sat, 1 Aug 2020 13:05:54 -0400 Subject: [PATCH 191/632] DOC: add note on str cons to read_sql (#35503) --- pandas/io/sql.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c87391eaa62b1..51888e5021d80 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -439,7 +439,8 @@ def read_sql( con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible - for engine disposal and connection closure for the SQLAlchemy connectable. See + for engine disposal and connection closure for the SQLAlchemy connectable; str + connections are closed automatically. See `here `_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). From 017bde1ad41bdd37d87b0ad651ecab1b3d3da1d5 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Sat, 1 Aug 2020 13:10:09 -0400 Subject: [PATCH 192/632] BUG: date_range doesn't propagate ambigous=False to tz_localize (#35302) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/arrays/datetimes.py | 4 +- .../indexes/datetimes/test_constructors.py | 59 +++++++++++++++++++ 3 files changed, 62 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2066858e5de86..b16ca0a80c5b4 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -71,7 +71,7 @@ Timedelta Timezones ^^^^^^^^^ -- +- Bug in :func:`date_range` was raising AmbiguousTimeError for valid input with `ambiguous=False` (:issue:`35297`) - diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d674b1c476d2c..8b2bb7832b5d0 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -418,9 +418,9 @@ def _generate_range( # index is localized datetime64 array -> have to convert # start/end as well to compare if start is not None: - start = start.tz_localize(tz).asm8 + start = start.tz_localize(tz, ambiguous, nonexistent).asm8 if end is not None: - end = end.tz_localize(tz).asm8 + end = end.tz_localize(tz, ambiguous, nonexistent).asm8 else: # Create a linearly spaced date_range in local time # Nanosecond-granularity timestamps aren't always correctly diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index c150e7901c86a..9a855a1624520 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -787,6 +787,65 @@ def test_construction_with_nat_and_tzlocal(self): expected = DatetimeIndex([Timestamp("2018", tz=tz), pd.NaT]) tm.assert_index_equal(result, expected) + def test_constructor_with_ambiguous_keyword_arg(self): + # GH 35297 + + expected = DatetimeIndex( + ["2020-11-01 01:00:00", "2020-11-02 01:00:00"], + dtype="datetime64[ns, America/New_York]", + freq="D", + ambiguous=False, + ) + + # ambiguous keyword in start + timezone = "America/New_York" + start = pd.Timestamp(year=2020, month=11, day=1, hour=1).tz_localize( + timezone, ambiguous=False + ) + result = pd.date_range(start=start, periods=2, ambiguous=False) + tm.assert_index_equal(result, expected) + + # ambiguous keyword in end + timezone = "America/New_York" + end = pd.Timestamp(year=2020, month=11, day=2, hour=1).tz_localize( + timezone, ambiguous=False + ) + result = pd.date_range(end=end, periods=2, ambiguous=False) + tm.assert_index_equal(result, expected) + + def test_constructor_with_nonexistent_keyword_arg(self): + # GH 35297 + + timezone = "Europe/Warsaw" + + # nonexistent keyword in start + start = pd.Timestamp("2015-03-29 02:30:00").tz_localize( + timezone, nonexistent="shift_forward" + ) + result = pd.date_range(start=start, periods=2, freq="H") + expected = DatetimeIndex( + [ + pd.Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), + pd.Timestamp("2015-03-29 04:00:00+02:00", tz=timezone), + ] + ) + + tm.assert_index_equal(result, expected) + + # nonexistent keyword in end + end = pd.Timestamp("2015-03-29 02:30:00").tz_localize( + timezone, nonexistent="shift_forward" + ) + result = pd.date_range(end=end, periods=2, freq="H") + expected = DatetimeIndex( + [ + pd.Timestamp("2015-03-29 01:00:00+01:00", tz=timezone), + pd.Timestamp("2015-03-29 03:00:00+02:00", tz=timezone), + ] + ) + + tm.assert_index_equal(result, expected) + def test_constructor_no_precision_raises(self): # GH-24753, GH-24739 From a0c8425a5f2b74e8a716defd799c4a3716f66eff Mon Sep 17 00:00:00 2001 From: Mohammad Jafar Mashhadi Date: Sun, 2 Aug 2020 04:18:01 -0600 Subject: [PATCH 193/632] Added alignable boolean series and its example to `.loc` docs. (#35506) --- pandas/core/indexing.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 04d1dbceb3342..dd81823055390 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -255,6 +255,8 @@ def loc(self) -> "_LocIndexer": - A boolean array of the same length as the axis being sliced, e.g. ``[True, False, True]``. + - An alignable boolean Series. The index of the key will be aligned before + masking. - A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above) @@ -264,6 +266,8 @@ def loc(self) -> "_LocIndexer": ------ KeyError If any items are not found. + IndexingError + If an indexed key is passed and its index is unalignable to the frame index. See Also -------- @@ -319,6 +323,13 @@ def loc(self) -> "_LocIndexer": max_speed shield sidewinder 7 8 + Alignable boolean Series: + + >>> df.loc[pd.Series([False, True, False], + ... index=['viper', 'sidewinder', 'cobra'])] + max_speed shield + sidewinder 7 8 + Conditional that returns a boolean Series >>> df.loc[df['shield'] > 6] From 969945e6fb838873e7beb1e2f5940906d1af67aa Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Tue, 4 Aug 2020 00:13:37 +0100 Subject: [PATCH 194/632] TST: adding test for .describe() with duplicate columns (#35424) --- pandas/tests/groupby/test_function.py | 62 +++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index e693962e57ac3..cbfba16223f74 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -992,6 +992,68 @@ def test_frame_describe_unstacked_format(): tm.assert_frame_equal(result, expected) +@pytest.mark.filterwarnings( + "ignore:" + "indexing past lexsort depth may impact performance:" + "pandas.errors.PerformanceWarning" +) +@pytest.mark.parametrize("as_index", [True, False]) +def test_describe_with_duplicate_output_column_names(as_index): + # GH 35314 + df = pd.DataFrame( + { + "a": [99, 99, 99, 88, 88, 88], + "b": [1, 2, 3, 4, 5, 6], + "c": [10, 20, 30, 40, 50, 60], + }, + columns=["a", "b", "b"], + ) + + expected = ( + pd.DataFrame.from_records( + [ + ("a", "count", 3.0, 3.0), + ("a", "mean", 88.0, 99.0), + ("a", "std", 0.0, 0.0), + ("a", "min", 88.0, 99.0), + ("a", "25%", 88.0, 99.0), + ("a", "50%", 88.0, 99.0), + ("a", "75%", 88.0, 99.0), + ("a", "max", 88.0, 99.0), + ("b", "count", 3.0, 3.0), + ("b", "mean", 5.0, 2.0), + ("b", "std", 1.0, 1.0), + ("b", "min", 4.0, 1.0), + ("b", "25%", 4.5, 1.5), + ("b", "50%", 5.0, 2.0), + ("b", "75%", 5.5, 2.5), + ("b", "max", 6.0, 3.0), + ("b", "count", 3.0, 3.0), + ("b", "mean", 5.0, 2.0), + ("b", "std", 1.0, 1.0), + ("b", "min", 4.0, 1.0), + ("b", "25%", 4.5, 1.5), + ("b", "50%", 5.0, 2.0), + ("b", "75%", 5.5, 2.5), + ("b", "max", 6.0, 3.0), + ], + ) + .set_index([0, 1]) + .T + ) + expected.columns.names = [None, None] + expected.index = pd.Index([88, 99], name="a") + + if as_index: + expected = expected.drop(columns=["a"], level=0) + else: + expected = expected.reset_index(drop=True) + + result = df.groupby("a", as_index=as_index).describe() + + tm.assert_frame_equal(result, expected) + + def test_groupby_mean_no_overflow(): # Regression test for (#22487) df = pd.DataFrame( From d367c8ab515324ea4f1b9066e66b6d4a7daf2f6e Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Tue, 4 Aug 2020 00:19:26 +0100 Subject: [PATCH 195/632] TST: ensure that DataFrameGroupBy.apply does not convert datetime.date to pd.Timestamp (#35504) --- pandas/tests/groupby/test_apply.py | 32 +++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 5a1268bfb03db..525a6fe2637c3 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import date, datetime from io import StringIO import numpy as np @@ -1014,3 +1014,33 @@ def test_apply_with_timezones_aware(): result2 = df2.groupby("x", group_keys=False).apply(lambda df: df[["x", "y"]].copy()) tm.assert_frame_equal(result1, result2) + + +def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp(): + # GH 29617 + + df = pd.DataFrame( + { + "A": ["a", "a", "a", "b"], + "B": [ + date(2020, 1, 10), + date(2020, 1, 10), + date(2020, 2, 10), + date(2020, 2, 10), + ], + "C": [1, 2, 3, 4], + }, + index=pd.Index([100, 101, 102, 103], name="idx"), + ) + + grp = df.groupby(["A", "B"]) + result = grp.apply(lambda x: x.head(1)) + + expected = df.iloc[[0, 2, 3]] + expected = expected.reset_index() + expected.index = pd.MultiIndex.from_frame(expected[["A", "B", "idx"]]) + expected = expected.drop(columns="idx") + + tm.assert_frame_equal(result, expected) + for val in result.index.levels[1]: + assert type(val) is date From 5413b6b7eae0f591502c351629ffc858bf53dd2e Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 3 Aug 2020 16:22:03 -0700 Subject: [PATCH 196/632] REF: de-duplicate get_resolution (#35245) --- pandas/_libs/tslibs/vectorized.pyx | 57 +++++++++++++----------------- 1 file changed, 24 insertions(+), 33 deletions(-) diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index bdc00f6c6e21a..b23f8255a76ac 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -211,49 +211,40 @@ def get_resolution(const int64_t[:] stamps, tzinfo tz=None): int reso = RESO_DAY, curr_reso ndarray[int64_t] trans int64_t[:] deltas - Py_ssize_t[:] pos - int64_t local_val, delta + intp_t[:] pos + int64_t local_val, delta = NPY_NAT + bint use_utc = False, use_tzlocal = False, use_fixed = False if is_utc(tz) or tz is None: - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso + use_utc = True elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso + use_tzlocal = True else: - # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - if typ not in ["pytz", "dateutil"]: # static/fixed; in this case we know that len(delta) == 1 + use_fixed = True delta = deltas[0] - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i] + delta, &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso else: pos = trans.searchsorted(stamps, side="right") - 1 - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso + + for i in range(n): + if stamps[i] == NPY_NAT: + continue + + if use_utc: + local_val = stamps[i] + elif use_tzlocal: + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + elif use_fixed: + local_val = stamps[i] + delta + else: + local_val = stamps[i] + deltas[pos[i]] + + dt64_to_dtstruct(local_val, &dts) + curr_reso = _reso_stamp(&dts) + if curr_reso < reso: + reso = curr_reso return Resolution(reso) From b68b604c6000c6d9e2e1da62ab9160acf91b4a4e Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 3 Aug 2020 18:29:31 -0500 Subject: [PATCH 197/632] REGR: Check for float in isnaobj_old (#35510) --- doc/source/whatsnew/v1.1.1.rst | 2 +- pandas/_libs/missing.pyx | 5 ++++- pandas/tests/io/parser/test_common.py | 12 +++++++++++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 443589308ad4c..f8f655ce7b866 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -15,7 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ -- +- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). - - diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 760fab3781fd4..771e8053ac9be 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -155,7 +155,10 @@ def isnaobj_old(arr: ndarray) -> ndarray: result = np.zeros(n, dtype=np.uint8) for i in range(n): val = arr[i] - result[i] = checknull(val) or val == INF or val == NEGINF + result[i] = ( + checknull(val) + or util.is_float_object(val) and (val == INF or val == NEGINF) + ) return result.view(np.bool_) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 12e73bae40eac..5154a9ba6fdf0 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -18,7 +18,7 @@ from pandas.errors import DtypeWarning, EmptyDataError, ParserError import pandas.util._test_decorators as td -from pandas import DataFrame, Index, MultiIndex, Series, compat, concat +from pandas import DataFrame, Index, MultiIndex, Series, compat, concat, option_context import pandas._testing as tm from pandas.io.parsers import CParserWrapper, TextFileReader, TextParser @@ -2179,3 +2179,13 @@ def test_read_csv_names_not_accepting_sets(all_parsers): parser = all_parsers with pytest.raises(ValueError, match="Names should be an ordered collection."): parser.read_csv(StringIO(data), names=set("QAZ")) + + +def test_read_csv_with_use_inf_as_na(all_parsers): + # https://github.com/pandas-dev/pandas/issues/35493 + parser = all_parsers + data = "1.0\nNaN\n3.0" + with option_context("use_inf_as_na", True): + result = parser.read_csv(StringIO(data), header=None) + expected = DataFrame([1.0, np.nan, 3.0]) + tm.assert_frame_equal(result, expected) From 0b90685f4df2024754748b992d5eaaa352e7caa5 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 4 Aug 2020 00:30:24 +0100 Subject: [PATCH 198/632] REF: remove special casing from Index.equals (always dispatch to subclass) (#35330) --- pandas/core/indexes/base.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1be381e38b157..7ba94c76d0037 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4252,16 +4252,15 @@ def equals(self, other: Any) -> bool: if not isinstance(other, Index): return False - if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): - # if other is not object, use other's logic for coercion - return other.equals(self) - - if isinstance(other, ABCMultiIndex): - # d-level MultiIndex can equal d-tuple Index - return other.equals(self) - - if is_extension_array_dtype(other.dtype): - # All EA-backed Index subclasses override equals + # If other is a subclass of self and defines it's own equals method, we + # dispatch to the subclass method. For instance for a MultiIndex, + # a d-level MultiIndex can equal d-tuple Index. + # Note: All EA-backed Index subclasses override equals + if ( + isinstance(other, type(self)) + and type(other) is not type(self) + and other.equals is not self.equals + ): return other.equals(self) return array_equivalent(self._values, other._values) From 2a319238e47d3d7362188f7da4eb911199c423b8 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 4 Aug 2020 00:40:27 +0100 Subject: [PATCH 199/632] CI: activate azure pipelines on 1.1.x - DO NOT MERGE (#35468) --- azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e45cafc02cb61..113ad3e338952 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,9 +1,11 @@ # Adapted from https://github.com/numba/numba/blob/master/azure-pipelines.yml trigger: - master +- 1.1.x pr: - master +- 1.1.x variables: PYTEST_WORKERS: auto From 87c1761c3bc95baf8981789ff38f4dec465fe630 Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Tue, 4 Aug 2020 00:46:22 +0100 Subject: [PATCH 200/632] adding test for #18451 (#35494) --- pandas/tests/groupby/test_groupby.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index ebce5b0ef0a66..8c51ebf89f5c0 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2055,3 +2055,17 @@ def test_groups_repr_truncates(max_seq_items, expected): result = df.groupby(np.array(df.a)).groups.__repr__() assert result == expected + + +def test_group_on_two_row_multiindex_returns_one_tuple_key(): + # GH 18451 + df = pd.DataFrame([{"a": 1, "b": 2, "c": 99}, {"a": 1, "b": 2, "c": 88}]) + df = df.set_index(["a", "b"]) + + grp = df.groupby(["a", "b"]) + result = grp.indices + expected = {(1, 2): np.array([0, 1], dtype=np.int64)} + + assert len(result) == 1 + key = (1, 2) + assert (result[key] == expected[key]).all() From cda82847b1336b07c6a11727449139d19918fcf2 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Tue, 4 Aug 2020 00:48:54 +0100 Subject: [PATCH 201/632] BUG: CategoricalIndex.format (#35440) --- doc/source/whatsnew/v1.1.1.rst | 7 +++++++ pandas/core/indexes/category.py | 12 ++++++------ pandas/core/indexes/range.py | 7 +------ pandas/tests/indexes/categorical/test_category.py | 6 ++++++ pandas/tests/indexes/common.py | 6 ++++++ pandas/tests/indexes/datetimes/test_datetimelike.py | 6 ++++++ pandas/tests/indexes/test_base.py | 7 +++++-- pandas/tests/indexes/test_numeric.py | 7 +++++++ pandas/tests/io/formats/test_format.py | 9 +++++++++ 9 files changed, 53 insertions(+), 14 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index f8f655ce7b866..6a327a4fc732f 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -26,6 +26,13 @@ Fixed regressions Bug fixes ~~~~~~~~~ + +Categorical +^^^^^^^^^^^ + +- Bug in :meth:`CategoricalIndex.format` where, when stringified scalars had different lengths, the shorter string would be right-filled with spaces, so it had the same length as the longest string (:issue:`35439`) + + **Datetimelike** - diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index b0b008de69a94..74b235655e345 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -20,7 +20,7 @@ pandas_dtype, ) from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna from pandas.core import accessor from pandas.core.algorithms import take_1d @@ -348,12 +348,12 @@ def _format_attrs(self): return attrs def _format_with_header(self, header, na_rep="NaN") -> List[str]: - from pandas.io.formats.format import format_array + from pandas.io.formats.printing import pprint_thing - formatted_values = format_array( - self._values, formatter=None, na_rep=na_rep, justify="left" - ) - result = ibase.trim_front(formatted_values) + result = [ + pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep + for x in self._values + ] return header + result # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index e5e98039ff77b..eee610681087d 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,7 +1,7 @@ from datetime import timedelta import operator from sys import getsizeof -from typing import Any, List, Optional +from typing import Any, Optional import warnings import numpy as np @@ -33,8 +33,6 @@ from pandas.core.indexes.numeric import Int64Index from pandas.core.ops.common import unpack_zerodim_and_defer -from pandas.io.formats.printing import pprint_thing - _empty_range = range(0) @@ -197,9 +195,6 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None - def _format_with_header(self, header, na_rep="NaN") -> List[str]: - return header + [pprint_thing(x) for x in self._range] - # -------------------------------------------------------------------- _deprecation_message = ( "RangeIndex.{} is deprecated and will be " diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 7f30a77872bc1..8af26eef504fc 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -478,3 +478,9 @@ def test_reindex_base(self): def test_map_str(self): # See test_map.py pass + + def test_format_different_scalar_lengths(self): + # GH35439 + idx = CategoricalIndex(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index f5b9f4a401e60..3b41c4bfacf73 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -642,6 +642,12 @@ def test_equals_op(self): tm.assert_numpy_array_equal(index_a == item, expected3) tm.assert_series_equal(series_a == item, Series(expected3)) + def test_format(self): + # GH35439 + idx = self.create_index() + expected = [str(x) for x in idx] + assert idx.format() == expected + def test_hasnans_isnans(self, index): # GH 11343, added tests for hasnans / isnans if isinstance(index, MultiIndex): diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py index 7345ae3032463..a5abf2946feda 100644 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ b/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -20,6 +20,12 @@ def index(self, request): def create_index(self) -> DatetimeIndex: return date_range("20130101", periods=5) + def test_format(self): + # GH35439 + idx = self.create_index() + expected = [f"{x:%Y-%m-%d}" for x in idx] + assert idx.format() == expected + def test_shift(self): pass # handled in test_ops diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index eaf48421dc071..59ee88117a984 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1171,8 +1171,11 @@ def test_summary_bug(self): assert "~:{range}:0" in result assert "{other}%s" in result - def test_format(self, index): - self._check_method_works(Index.format, index) + def test_format_different_scalar_lengths(self): + # GH35439 + idx = Index(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + assert idx.format() == expected def test_format_bug(self): # GH 14626 diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index a7c5734ef9b02..bfcac5d433d2c 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -21,6 +21,13 @@ def test_can_hold_identifiers(self): key = idx[0] assert idx._can_hold_identifiers_and_holds_name(key) is False + def test_format(self): + # GH35439 + idx = self.create_index() + max_width = max(len(str(x)) for x in idx) + expected = [str(x).ljust(max_width) for x in idx] + assert idx.format() == expected + def test_numeric_compat(self): pass # override Base method diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index e236b3da73c69..84805d06df4a8 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2141,6 +2141,15 @@ def test_dict_entries(self): assert "'a': 1" in val assert "'b': 2" in val + def test_categorical_columns(self): + # GH35439 + data = [[4, 2], [3, 2], [4, 3]] + cols = ["aaaaaaaaa", "b"] + df = pd.DataFrame(data, columns=cols) + df_cat_cols = pd.DataFrame(data, columns=pd.CategoricalIndex(cols)) + + assert df.to_string() == df_cat_cols.to_string() + def test_period(self): # GH 12615 df = pd.DataFrame( From d9d34aee15cbec12fb9ee9a03591e9516d05c30a Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Tue, 4 Aug 2020 08:51:51 +0100 Subject: [PATCH 202/632] CLN/PERF: move RangeIndex._cached_data to RangeIndex._cache (#35432) * CLN: move cached_data to _cache['_data'] * add GH number * flake8 cleanup --- pandas/core/indexes/range.py | 16 ++------ pandas/tests/indexes/ranges/test_range.py | 45 +++++++++++++---------- 2 files changed, 29 insertions(+), 32 deletions(-) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index eee610681087d..1dc4fc1e91462 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,7 +1,7 @@ from datetime import timedelta import operator from sys import getsizeof -from typing import Any, Optional +from typing import Any import warnings import numpy as np @@ -78,8 +78,6 @@ class RangeIndex(Int64Index): _engine_type = libindex.Int64Engine _range: range - # check whether self._data has been called - _cached_data: Optional[np.ndarray] = None # -------------------------------------------------------------------- # Constructors @@ -150,20 +148,14 @@ def _constructor(self): """ return the class to use for construction """ return Int64Index - @property + @cache_readonly def _data(self): """ An int array that for performance reasons is created only when needed. - The constructed array is saved in ``_cached_data``. This allows us to - check if the array has been created without accessing ``_data`` and - triggering the construction. + The constructed array is saved in ``_cache``. """ - if self._cached_data is None: - self._cached_data = np.arange( - self.start, self.stop, self.step, dtype=np.int64 - ) - return self._cached_data + return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def _int64index(self) -> Int64Index: diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 5b6f9cb358b7d..ef4bb9a0869b0 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -137,53 +137,58 @@ def test_dtype(self): index = self.create_index() assert index.dtype == np.int64 - def test_cached_data(self): - # GH 26565, GH26617 - # Calling RangeIndex._data caches an int64 array of the same length at - # self._cached_data. This test checks whether _cached_data has been set + def test_cache(self): + # GH 26565, GH26617, GH35432 + # This test checks whether _cache has been set. + # Calling RangeIndex._cache["_data"] creates an int64 array of the same length + # as the RangeIndex and stores it in _cache. idx = RangeIndex(0, 100, 10) - assert idx._cached_data is None + assert idx._cache == {} repr(idx) - assert idx._cached_data is None + assert idx._cache == {} str(idx) - assert idx._cached_data is None + assert idx._cache == {} idx.get_loc(20) - assert idx._cached_data is None + assert idx._cache == {} - 90 in idx - assert idx._cached_data is None + 90 in idx # True + assert idx._cache == {} - 91 in idx - assert idx._cached_data is None + 91 in idx # False + assert idx._cache == {} idx.all() - assert idx._cached_data is None + assert idx._cache == {} idx.any() - assert idx._cached_data is None + assert idx._cache == {} df = pd.DataFrame({"a": range(10)}, index=idx) df.loc[50] - assert idx._cached_data is None + assert idx._cache == {} with pytest.raises(KeyError, match="51"): df.loc[51] - assert idx._cached_data is None + assert idx._cache == {} df.loc[10:50] - assert idx._cached_data is None + assert idx._cache == {} df.iloc[5:10] - assert idx._cached_data is None + assert idx._cache == {} - # actually calling idx._data + # idx._cache should contain a _data entry after call to idx._data + idx._data assert isinstance(idx._data, np.ndarray) - assert isinstance(idx._cached_data, np.ndarray) + assert idx._data is idx._data # check cached value is reused + assert len(idx._cache) == 4 + expected = np.arange(0, 100, 10, dtype="int64") + tm.assert_numpy_array_equal(idx._cache["_data"], expected) def test_is_monotonic(self): index = RangeIndex(0, 20, 2) From 53f6b4711092ebfdf9059bd8224714037d4d5794 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 4 Aug 2020 09:24:28 +0100 Subject: [PATCH 203/632] CI: activate github actions on 1.1.x (PR only) (#35467) --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index db1fc30111a2d..149acef72db26 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,9 @@ on: push: branches: master pull_request: - branches: master + branches: + - master + - 1.1.x env: ENV_FILE: environment.yml From 3b6915ef9435b13ac5d8c19634270ae231f8e47c Mon Sep 17 00:00:00 2001 From: Marc Garcia Date: Tue, 4 Aug 2020 16:20:56 +0100 Subject: [PATCH 204/632] WEB: Fixing whatsnew link in the home page (version was hardcoded) (#35451) --- web/pandas/index.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/pandas/index.html b/web/pandas/index.html index 83d0f48197033..75c797d6dd93d 100644 --- a/web/pandas/index.html +++ b/web/pandas/index.html @@ -63,7 +63,7 @@
With the support of:
{% if releases %}

Latest version: {{ releases[0].name }}

    -
  • What's new in {{ releases[0].name }}
  • +
  • What's new in {{ releases[0].name }}
  • Release date:
    {{ releases[0].published.strftime("%b %d, %Y") }}
  • Documentation (web)
  • Documentation (pdf)
  • From a4203cf8d440f33d50076166fad3b0577a3ef8fa Mon Sep 17 00:00:00 2001 From: Deepyaman Datta Date: Tue, 4 Aug 2020 19:00:15 -0400 Subject: [PATCH 205/632] Fix unnecessary pluralization (#35553) --- pandas/core/frame.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 79627e43d78c2..d229cd5a9d7ec 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4193,7 +4193,7 @@ def rename( Parameters ---------- mapper : dict-like or function - Dict-like or functions transformations to apply to + Dict-like or function transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. From 3701a9b1bfc3ad3890c2fb1fe1974a4768f6d5f8 Mon Sep 17 00:00:00 2001 From: Sander Date: Wed, 5 Aug 2020 04:27:22 +0200 Subject: [PATCH 206/632] Added paragraph on creating DataFrame from list of namedtuples (#35507) --- doc/source/user_guide/dsintro.rst | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst index 360a14998b227..23bd44c1969a5 100644 --- a/doc/source/user_guide/dsintro.rst +++ b/doc/source/user_guide/dsintro.rst @@ -397,6 +397,32 @@ The result will be a DataFrame with the same index as the input Series, and with one column whose name is the original name of the Series (only if no other column name provided). + +.. _basics.dataframe.from_list_namedtuples: + +From a list of namedtuples +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The field names of the first ``namedtuple`` in the list determine the columns +of the ``DataFrame``. The remaining namedtuples (or tuples) are simply unpacked +and their values are fed into the rows of the ``DataFrame``. If any of those +tuples is shorter than the first ``namedtuple`` then the later columns in the +corresponding row are marked as missing values. If any are longer than the +first ``namedtuple``, a ``ValueError`` is raised. + +.. ipython:: python + + from collections import namedtuple + + Point = namedtuple('Point', 'x y') + + pd.DataFrame([Point(0, 0), Point(0, 3), (2, 3)]) + + Point3D = namedtuple('Point3D', 'x y z') + + pd.DataFrame([Point3D(0, 0, 0), Point3D(0, 3, 5), Point(2, 3)]) + + .. _basics.dataframe.from_list_dataclasses: From a list of dataclasses From b827fe2cd007fd8b70ea01e4b0309fd586f5d7e0 Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Thu, 6 Aug 2020 13:42:56 +0100 Subject: [PATCH 207/632] MAINT: Fix issue in StataReader due to upstream changes (#35427) --- pandas/io/stata.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7677d8a94d521..3717a2025cf51 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1643,8 +1643,7 @@ def read( data = self._insert_strls(data) - cols_ = np.where(self.dtyplist)[0] - + cols_ = np.where([dtyp is not None for dtyp in self.dtyplist])[0] # Convert columns (if needed) to match input type ix = data.index requires_type_conversion = False From 69cd7445179fe9d4493ebe11846dc1990b332bd2 Mon Sep 17 00:00:00 2001 From: David Li Date: Thu, 6 Aug 2020 11:11:21 -0400 Subject: [PATCH 208/632] BUG: handle immutable arrays in tz_convert_from_utc (#35530) (#35532) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/_libs/tslibs/tzconversion.pyx | 6 +++--- pandas/tests/tslibs/test_conversion.py | 8 ++++++++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b16ca0a80c5b4..304897edbb75e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -59,7 +59,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ -- +- Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) - Timedelta diff --git a/pandas/_libs/tslibs/tzconversion.pyx b/pandas/_libs/tslibs/tzconversion.pyx index 2b148cd8849f1..4c62b16d430bd 100644 --- a/pandas/_libs/tslibs/tzconversion.pyx +++ b/pandas/_libs/tslibs/tzconversion.pyx @@ -410,7 +410,7 @@ cpdef int64_t tz_convert_from_utc_single(int64_t val, tzinfo tz): return val + deltas[pos] -def tz_convert_from_utc(int64_t[:] vals, tzinfo tz): +def tz_convert_from_utc(const int64_t[:] vals, tzinfo tz): """ Convert the values (in i8) from UTC to tz @@ -435,7 +435,7 @@ def tz_convert_from_utc(int64_t[:] vals, tzinfo tz): @cython.boundscheck(False) @cython.wraparound(False) -cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): +cdef int64_t[:] _tz_convert_from_utc(const int64_t[:] vals, tzinfo tz): """ Convert the given values (in i8) either to UTC or from UTC. @@ -457,7 +457,7 @@ cdef int64_t[:] _tz_convert_from_utc(int64_t[:] vals, tzinfo tz): str typ if is_utc(tz): - converted = vals + converted = vals.copy() elif is_tzlocal(tz): converted = np.empty(n, dtype=np.int64) for i in range(n): diff --git a/pandas/tests/tslibs/test_conversion.py b/pandas/tests/tslibs/test_conversion.py index 4f184b78f34a1..87cd97f853f4d 100644 --- a/pandas/tests/tslibs/test_conversion.py +++ b/pandas/tests/tslibs/test_conversion.py @@ -78,6 +78,14 @@ def test_tz_convert_corner(arr): tm.assert_numpy_array_equal(result, arr) +def test_tz_convert_readonly(): + # GH#35530 + arr = np.array([0], dtype=np.int64) + arr.setflags(write=False) + result = tzconversion.tz_convert_from_utc(arr, UTC) + tm.assert_numpy_array_equal(result, arr) + + @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"]) def test_length_zero_copy(dtype, copy): From 385a2ca42ae17ac5457901e8718b3f23263d86ad Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 6 Aug 2020 16:12:34 +0100 Subject: [PATCH 209/632] TYP: Add MyPy Error Codes (#35311) --- ci/code_checks.sh | 5 ++++ environment.yml | 1 + pandas/_config/config.py | 2 +- pandas/compat/pickle_compat.py | 6 ++--- pandas/core/algorithms.py | 3 ++- pandas/core/arrays/datetimelike.py | 9 +++++-- pandas/core/arrays/interval.py | 2 +- pandas/core/arrays/period.py | 4 +-- pandas/core/computation/expressions.py | 3 ++- pandas/core/computation/parsing.py | 4 ++- pandas/core/computation/pytables.py | 2 +- pandas/core/config_init.py | 2 +- pandas/core/dtypes/common.py | 6 +++-- pandas/core/dtypes/dtypes.py | 3 ++- pandas/core/dtypes/generic.py | 2 +- pandas/core/frame.py | 21 +++++++++++---- pandas/core/generic.py | 18 +++++++++---- pandas/core/indexes/base.py | 6 +++-- pandas/core/indexes/datetimelike.py | 8 ++++-- pandas/core/internals/blocks.py | 3 ++- pandas/core/ops/array_ops.py | 3 ++- pandas/core/resample.py | 2 +- pandas/core/series.py | 8 ++++-- pandas/core/tools/datetimes.py | 2 +- pandas/io/common.py | 24 ++++++++++++++++-- pandas/io/formats/html.py | 3 ++- pandas/io/formats/printing.py | 12 +++++---- pandas/io/json/_json.py | 3 ++- pandas/io/pytables.py | 9 ++++--- pandas/io/stata.py | 5 +++- pandas/plotting/_matplotlib/timeseries.py | 8 ++++-- pandas/tests/indexes/test_base.py | 31 ++++++++++++----------- pandas/tests/io/pytables/test_store.py | 6 ++--- pandas/tests/io/test_fsspec.py | 3 ++- pandas/util/_decorators.py | 14 +++++++--- requirements-dev.txt | 3 ++- setup.cfg | 1 + 37 files changed, 170 insertions(+), 77 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 69ce0f1adce22..816bb23865c04 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -230,6 +230,11 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + # https://github.com/python/mypy/issues/7384 + # MSG='Check for missing error codes with # type: ignore' ; echo $MSG + # invgrep -R --include="*.py" -P '# type: ignore(?!\[)' pandas + # RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of foo.__class__ instead of type(foo)' ; echo $MSG invgrep -R --include=*.{py,pyx} '\.__class__' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/environment.yml b/environment.yml index 9efb995e29497..ed9762e5b8893 100644 --- a/environment.yml +++ b/environment.yml @@ -109,3 +109,4 @@ dependencies: - pip: - git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master - git+https://github.com/numpy/numpydoc + - pyflakes>=2.2.0 diff --git a/pandas/_config/config.py b/pandas/_config/config.py index d7b73a0a685d3..fb41b37980b2e 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -462,7 +462,7 @@ def register_option( for k in path: # NOTE: tokenize.Name is not a public constant # error: Module has no attribute "Name" [attr-defined] - if not re.match("^" + tokenize.Name + "$", k): # type: ignore + if not re.match("^" + tokenize.Name + "$", k): # type: ignore[attr-defined] raise ValueError(f"{k} is not a valid identifier") if keyword.iskeyword(k): raise ValueError(f"{k} is a python keyword") diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 015b203a60256..ef9f36705a7ee 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -64,7 +64,7 @@ class _LoadSparseSeries: # https://github.com/python/mypy/issues/1020 # error: Incompatible return type for "__new__" (returns "Series", but must return # a subtype of "_LoadSparseSeries") - def __new__(cls) -> "Series": # type: ignore + def __new__(cls) -> "Series": # type: ignore[misc] from pandas import Series warnings.warn( @@ -82,7 +82,7 @@ class _LoadSparseFrame: # https://github.com/python/mypy/issues/1020 # error: Incompatible return type for "__new__" (returns "DataFrame", but must # return a subtype of "_LoadSparseFrame") - def __new__(cls) -> "DataFrame": # type: ignore + def __new__(cls) -> "DataFrame": # type: ignore[misc] from pandas import DataFrame warnings.warn( @@ -181,7 +181,7 @@ def __new__(cls) -> "DataFrame": # type: ignore # functions for compat and uses a non-public class of the pickle module. # error: Name 'pkl._Unpickler' is not defined -class Unpickler(pkl._Unpickler): # type: ignore +class Unpickler(pkl._Unpickler): # type: ignore[name-defined] def find_class(self, module, name): # override superclass key = (module, name) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9e3ca4cc53363..befde7c355818 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -427,7 +427,8 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: if is_categorical_dtype(comps): # TODO(extension) # handle categoricals - return comps.isin(values) # type: ignore + # error: "ExtensionArray" has no attribute "isin" [attr-defined] + return comps.isin(values) # type: ignore[attr-defined] comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index c6945e2f78b5a..1b5e1d81f00d6 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -468,6 +468,9 @@ def _ndarray(self) -> np.ndarray: def _from_backing_data(self: _T, arr: np.ndarray) -> _T: # Note: we do not retain `freq` + # error: Unexpected keyword argument "dtype" for "NDArrayBackedExtensionArray" + # TODO: add my error code + # https://github.com/python/mypy/issues/7384 return type(self)(arr, dtype=self.dtype) # type: ignore # ------------------------------------------------------------------ @@ -809,7 +812,8 @@ def _validate_scalar( value = NaT elif isinstance(value, self._recognized_scalars): - value = self._scalar_type(value) # type: ignore + # error: Too many arguments for "object" [call-arg] + value = self._scalar_type(value) # type: ignore[call-arg] else: if msg is None: @@ -1129,7 +1133,8 @@ def resolution(self) -> str: """ Returns day, hour, minute, second, millisecond or microsecond """ - return self._resolution_obj.attrname # type: ignore + # error: Item "None" of "Optional[Any]" has no attribute "attrname" + return self._resolution_obj.attrname # type: ignore[union-attr] @classmethod def _validate_frequency(cls, index, freq, **kwargs): diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index ed2437cc061bd..d76e0fd628a48 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -1057,7 +1057,7 @@ def mid(self): # https://github.com/python/mypy/issues/1362 # Mypy does not support decorated properties - @property # type: ignore + @property # type: ignore[misc] @Appender( _interval_shared_docs["is_non_overlapping_monotonic"] % _shared_docs_kwargs ) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index fe78481d99d30..ddaf6d39f1837 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -278,8 +278,8 @@ def _check_compatible_with(self, other, setitem: bool = False): def dtype(self) -> PeriodDtype: return self._dtype - # error: Read-only property cannot override read-write property [misc] - @property # type: ignore + # error: Read-only property cannot override read-write property + @property # type: ignore[misc] def freq(self) -> BaseOffset: """ Return the frequency object for this PeriodArray. diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 0e9077e6d557e..05a5538a88772 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -227,7 +227,8 @@ def evaluate(op, a, b, use_numexpr: bool = True): if op_str is not None: use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: - return _evaluate(op, op_str, a, b) # type: ignore + # error: "None" not callable + return _evaluate(op, op_str, a, b) # type: ignore[misc] return _evaluate_standard(op, op_str, a, b) diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index c7c7103654a65..86e125b6b909b 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -37,7 +37,9 @@ def create_valid_python_identifier(name: str) -> str: special_characters_replacements = { char: f"_{token.tok_name[tokval]}_" # The ignore here is because of a bug in mypy that is resolved in 0.740 - for char, tokval in tokenize.EXACT_TOKEN_TYPES.items() # type: ignore + for char, tokval in ( + tokenize.EXACT_TOKEN_TYPES.items() # type: ignore[attr-defined] + ) } special_characters_replacements.update( { diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 001eb1789007f..f1b11a6869c2b 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -63,7 +63,7 @@ def _resolve_name(self): return self.name # read-only property overwriting read/write property - @property # type: ignore + @property # type: ignore[misc] def value(self): return self._value diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 2b2431149e230..0c23f1b4bcdf2 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -327,7 +327,7 @@ def is_terminal() -> bool: """ try: # error: Name 'get_ipython' is not defined - ip = get_ipython() # type: ignore + ip = get_ipython() # type: ignore[name-defined] except NameError: # assume standard Python interpreter in a terminal return True else: diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index a2ca4d84b2bf6..73109020b1b54 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -136,11 +136,13 @@ def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array: """ # TODO: GH27506 potential bug with ExtensionArrays try: - return arr.astype("int64", copy=copy, casting="safe") # type: ignore + # error: Unexpected keyword argument "casting" for "astype" + return arr.astype("int64", copy=copy, casting="safe") # type: ignore[call-arg] except TypeError: pass try: - return arr.astype("uint64", copy=copy, casting="safe") # type: ignore + # error: Unexpected keyword argument "casting" for "astype" + return arr.astype("uint64", copy=copy, casting="safe") # type: ignore[call-arg] except TypeError: if is_extension_array_dtype(arr.dtype): return arr.to_numpy(dtype="float64", na_value=np.nan) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8350e136417b1..8dc500dddeafa 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -635,7 +635,8 @@ class DatetimeTZDtype(PandasExtensionDtype): def __init__(self, unit: Union[str_type, "DatetimeTZDtype"] = "ns", tz=None): if isinstance(unit, DatetimeTZDtype): - unit, tz = unit.unit, unit.tz # type: ignore + # error: "str" has no attribute "tz" + unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 36eff214fc314..1f1017cfc1929 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -7,7 +7,7 @@ def create_pandas_abc_type(name, attr, comp): # https://github.com/python/mypy/issues/1006 # error: 'classmethod' used with a non-method - @classmethod # type: ignore + @classmethod # type: ignore[misc] def _check(cls, inst) -> bool: return getattr(inst, attr, "_typ") in comp diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d229cd5a9d7ec..aabdac16e9a1a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2164,10 +2164,14 @@ def to_stata( from pandas.io.stata import StataWriter as statawriter elif version == 117: # mypy: Name 'statawriter' already defined (possibly by an import) - from pandas.io.stata import StataWriter117 as statawriter # type: ignore + from pandas.io.stata import ( # type: ignore[no-redef] + StataWriter117 as statawriter, + ) else: # versions 118 and 119 # mypy: Name 'statawriter' already defined (possibly by an import) - from pandas.io.stata import StataWriterUTF8 as statawriter # type: ignore + from pandas.io.stata import ( # type: ignore[no-redef] + StataWriterUTF8 as statawriter, + ) kwargs: Dict[str, Any] = {} if version is None or version >= 117: @@ -2178,7 +2182,7 @@ def to_stata( kwargs["version"] = version # mypy: Too many arguments for "StataWriter" - writer = statawriter( # type: ignore + writer = statawriter( # type: ignore[call-arg] path, self, convert_dates=convert_dates, @@ -3578,7 +3582,13 @@ def extract_unique_dtypes_from_dtypes_set( extracted_dtypes = [ unique_dtype for unique_dtype in unique_dtypes - if issubclass(unique_dtype.type, tuple(dtypes_set)) # type: ignore + # error: Argument 1 to "tuple" has incompatible type + # "FrozenSet[Union[ExtensionDtype, str, Any, Type[str], + # Type[float], Type[int], Type[complex], Type[bool]]]"; + # expected "Iterable[Union[type, Tuple[Any, ...]]]" + if issubclass( + unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type] + ) ] return extracted_dtypes @@ -5250,7 +5260,8 @@ def f(vals): # TODO: Just move the sort_values doc here. @Substitution(**_shared_doc_kwargs) @Appender(NDFrame.sort_values.__doc__) - def sort_values( # type: ignore[override] # NOQA # issue 27237 + # error: Signature of "sort_values" incompatible with supertype "NDFrame" + def sort_values( # type: ignore[override] self, by, axis=0, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e46fde1f59f16..42d02f37508fc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -589,9 +589,9 @@ def swapaxes(self: FrameOrSeries, axis1, axis2, copy=True) -> FrameOrSeries: # ignore needed because of NDFrame constructor is different than # DataFrame/Series constructors. - return self._constructor(new_values, *new_axes).__finalize__( # type: ignore - self, method="swapaxes" - ) + return self._constructor( + new_values, *new_axes # type: ignore[arg-type] + ).__finalize__(self, method="swapaxes") def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: """ @@ -4011,7 +4011,11 @@ def add_prefix(self: FrameOrSeries, prefix: str) -> FrameOrSeries: f = functools.partial("{prefix}{}".format, prefix=prefix) mapper = {self._info_axis_name: f} - return self.rename(**mapper) # type: ignore + # error: Incompatible return value type (got "Optional[FrameOrSeries]", + # expected "FrameOrSeries") + # error: Argument 1 to "rename" of "NDFrame" has incompatible type + # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" + return self.rename(**mapper) # type: ignore[return-value, arg-type] def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: """ @@ -4070,7 +4074,11 @@ def add_suffix(self: FrameOrSeries, suffix: str) -> FrameOrSeries: f = functools.partial("{}{suffix}".format, suffix=suffix) mapper = {self._info_axis_name: f} - return self.rename(**mapper) # type: ignore + # error: Incompatible return value type (got "Optional[FrameOrSeries]", + # expected "FrameOrSeries") + # error: Argument 1 to "rename" of "NDFrame" has incompatible type + # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" + return self.rename(**mapper) # type: ignore[return-value, arg-type] def sort_values( self, diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7ba94c76d0037..278999930463f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -885,7 +885,8 @@ def _format_data(self, name=None) -> str_t: if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": - if is_object_dtype(self.categories): # type: ignore + # error: "Index" has no attribute "categories" + if is_object_dtype(self.categories): # type: ignore[attr-defined] is_justify = False return format_object_summary( @@ -940,7 +941,8 @@ def _format_with_header(self, header, na_rep="NaN") -> List[str_t]: if mask.any(): result = np.array(result) result[mask] = na_rep - result = result.tolist() # type: ignore + # error: "List[str]" has no attribute "tolist" + result = result.tolist() # type: ignore[attr-defined] else: result = trim_front(format_array(values, None, justify="left")) return header + result diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 15a7e25238983..0ce057d6e764a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -54,7 +54,8 @@ def _join_i8_wrapper(joinf, with_indexers: bool = True): Create the join wrapper methods. """ - @staticmethod # type: ignore + # error: 'staticmethod' used with a non-method + @staticmethod # type: ignore[misc] def wrapper(left, right): if isinstance(left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)): left = left.view("i8") @@ -95,7 +96,10 @@ class DatetimeIndexOpsMixin(ExtensionIndex): _bool_ops: List[str] = [] _field_ops: List[str] = [] - hasnans = cache_readonly(DatetimeLikeArrayMixin._hasnans.fget) # type: ignore + # error: "Callable[[Any], Any]" has no attribute "fget" + hasnans = cache_readonly( + DatetimeLikeArrayMixin._hasnans.fget # type: ignore[attr-defined] + ) _hasnans = hasnans # for index / array -agnostic code @property diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 6ca6eca1ff829..3186c555b7ae1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2744,7 +2744,8 @@ def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. - values = values.reshape(tuple((1,) + shape)) # type: ignore + # error: "ExtensionArray" has no attribute "reshape" + values = values.reshape(tuple((1,) + shape)) # type: ignore[attr-defined] return values diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 3379ee56b6ad0..aab10cea33632 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -349,7 +349,8 @@ def fill_bool(x, left=None): filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool res_values = na_logical_op(lvalues, rvalues, op) - res_values = filler(res_values) # type: ignore + # error: Cannot call function of unknown type + res_values = filler(res_values) # type: ignore[operator] return res_values diff --git a/pandas/core/resample.py b/pandas/core/resample.py index bfdfc65723433..e82a1d4d2cda8 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -967,7 +967,7 @@ def __init__(self, obj, *args, **kwargs): setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) # error: Too many arguments for "__init__" of "object" - super().__init__(None) # type: ignore + super().__init__(None) # type: ignore[call-arg] self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/pandas/core/series.py b/pandas/core/series.py index ef3be854bc3bb..9e70120f67969 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -571,7 +571,8 @@ def _values(self): """ return self._mgr.internal_values() - @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore + # error: Decorated property not supported + @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc] @property def array(self) -> ExtensionArray: return self._mgr._block.array_values() @@ -4921,7 +4922,10 @@ def to_timestamp(self, freq=None, how="start", copy=True) -> "Series": if not isinstance(self.index, PeriodIndex): raise TypeError(f"unsupported Type {type(self.index).__name__}") - new_index = self.index.to_timestamp(freq=freq, how=how) # type: ignore + # error: "PeriodIndex" has no attribute "to_timestamp" + new_index = self.index.to_timestamp( # type: ignore[attr-defined] + freq=freq, how=how + ) return self._constructor(new_values, index=new_index).__finalize__( self, method="to_timestamp" ) diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 7aac2f793f61a..3c1fe6bacefcf 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -309,7 +309,7 @@ def _convert_listlike_datetimes( if tz == "utc": # error: Item "DatetimeIndex" of "Union[DatetimeArray, DatetimeIndex]" has # no attribute "tz_convert" - arg = arg.tz_convert(None).tz_localize(tz) # type: ignore + arg = arg.tz_convert(None).tz_localize(tz) # type: ignore[union-attr] return arg elif is_datetime64_ns_dtype(arg_dtype): diff --git a/pandas/io/common.py b/pandas/io/common.py index 6ac8051f35b6f..f39b8279fbdb0 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -121,7 +121,15 @@ def stringify_path( """ if hasattr(filepath_or_buffer, "__fspath__"): # https://github.com/python/mypy/issues/1424 - return filepath_or_buffer.__fspath__() # type: ignore + # error: Item "str" of "Union[str, Path, IO[str]]" has no attribute + # "__fspath__" [union-attr] + # error: Item "IO[str]" of "Union[str, Path, IO[str]]" has no attribute + # "__fspath__" [union-attr] + # error: Item "str" of "Union[str, Path, IO[bytes]]" has no attribute + # "__fspath__" [union-attr] + # error: Item "IO[bytes]" of "Union[str, Path, IO[bytes]]" has no + # attribute "__fspath__" [union-attr] + return filepath_or_buffer.__fspath__() # type: ignore[union-attr] elif isinstance(filepath_or_buffer, pathlib.Path): return str(filepath_or_buffer) return _expand_user(filepath_or_buffer) @@ -516,7 +524,19 @@ def get_handle( return f, handles -class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore +# error: Definition of "__exit__" in base class "ZipFile" is incompatible with +# definition in base class "BytesIO" [misc] +# error: Definition of "__enter__" in base class "ZipFile" is incompatible with +# definition in base class "BytesIO" [misc] +# error: Definition of "__enter__" in base class "ZipFile" is incompatible with +# definition in base class "BinaryIO" [misc] +# error: Definition of "__enter__" in base class "ZipFile" is incompatible with +# definition in base class "IO" [misc] +# error: Definition of "read" in base class "ZipFile" is incompatible with +# definition in base class "BytesIO" [misc] +# error: Definition of "read" in base class "ZipFile" is incompatible with +# definition in base class "IO" [misc] +class _BytesZipFile(zipfile.ZipFile, BytesIO): # type: ignore[misc] """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 13f0ab1e8a52c..c89189f1e679a 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -86,8 +86,9 @@ def _get_columns_formatted_values(self) -> Iterable: return self.columns # https://github.com/python/mypy/issues/1237 + # error: Signature of "is_truncated" incompatible with supertype "TableFormatter" @property - def is_truncated(self) -> bool: # type: ignore + def is_truncated(self) -> bool: # type: ignore[override] return self.fmt.is_truncated @property diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 1cf79dc105901..23daab725ec65 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -499,7 +499,7 @@ def _justify( # error: Incompatible return value type (got "Tuple[List[Sequence[str]], # List[Sequence[str]]]", expected "Tuple[List[Tuple[str, ...]], # List[Tuple[str, ...]]]") - return head, tail # type: ignore + return head, tail # type: ignore[return-value] def format_object_attrs( @@ -524,14 +524,16 @@ def format_object_attrs( attrs: List[Tuple[str, Union[str, int]]] = [] if hasattr(obj, "dtype") and include_dtype: # error: "Sequence[Any]" has no attribute "dtype" - attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore + attrs.append(("dtype", f"'{obj.dtype}'")) # type: ignore[attr-defined] if getattr(obj, "name", None) is not None: # error: "Sequence[Any]" has no attribute "name" - attrs.append(("name", default_pprint(obj.name))) # type: ignore + attrs.append(("name", default_pprint(obj.name))) # type: ignore[attr-defined] # error: "Sequence[Any]" has no attribute "names" - elif getattr(obj, "names", None) is not None and any(obj.names): # type: ignore + elif getattr(obj, "names", None) is not None and any( + obj.names # type: ignore[attr-defined] + ): # error: "Sequence[Any]" has no attribute "names" - attrs.append(("names", default_pprint(obj.names))) # type: ignore + attrs.append(("names", default_pprint(obj.names))) # type: ignore[attr-defined] max_seq_items = get_option("display.max_seq_items") or len(obj) if len(obj) > max_seq_items: attrs.append(("length", len(obj))) diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index ff37c36962aec..0b06a26d4aa3c 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -115,7 +115,8 @@ def __init__( self.obj = obj if orient is None: - orient = self._default_orient # type: ignore + # error: "Writer" has no attribute "_default_orient" + orient = self._default_orient # type: ignore[attr-defined] self.orient = orient self.date_format = date_format diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index e0df4c29e543e..9f5b6041b0ffa 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2280,7 +2280,8 @@ def _get_atom(cls, values: ArrayLike) -> "Col": Get an appropriately typed and shaped pytables.Col object for values. """ dtype = values.dtype - itemsize = dtype.itemsize # type: ignore + # error: "ExtensionDtype" has no attribute "itemsize" + itemsize = dtype.itemsize # type: ignore[attr-defined] shape = values.shape if values.ndim == 1: @@ -3349,9 +3350,9 @@ def queryables(self) -> Dict[str, Any]: (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns) ] - return dict(d1 + d2 + d3) # type: ignore - # error: List comprehension has incompatible type - # List[Tuple[Any, None]]; expected List[Tuple[str, IndexCol]] + # error: Unsupported operand types for + ("List[Tuple[str, IndexCol]]" + # and "List[Tuple[str, None]]") + return dict(d1 + d2 + d3) # type: ignore[operator] def index_cols(self): """ return a list of my index cols """ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 3717a2025cf51..cb23b781a7ad2 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1952,7 +1952,10 @@ def _open_file_binary_write( """ if hasattr(fname, "write"): # See https://github.com/python/mypy/issues/1424 for hasattr challenges - return fname, False, None # type: ignore + # error: Incompatible return value type (got "Tuple[Union[str, Path, + # IO[Any]], bool, None]", expected "Tuple[BinaryIO, bool, Union[str, + # Mapping[str, str], None]]") + return fname, False, None # type: ignore[return-value] elif isinstance(fname, (str, Path)): # Extract compression mode as given, if dict compression_typ, compression_args = get_compression_method(compression) diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 95f9fbf3995ed..eef4276f0ed09 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -45,7 +45,10 @@ def _maybe_resample(series: "Series", ax, kwargs): if ax_freq is not None and freq != ax_freq: if is_superperiod(freq, ax_freq): # upsample input series = series.copy() - series.index = series.index.asfreq(ax_freq, how="s") # type: ignore + # error: "Index" has no attribute "asfreq" + series.index = series.index.asfreq( # type: ignore[attr-defined] + ax_freq, how="s" + ) freq = ax_freq elif _is_sup(freq, ax_freq): # one is weekly how = kwargs.pop("how", "last") @@ -222,7 +225,8 @@ def _get_index_freq(index: "Index") -> Optional[BaseOffset]: if freq is None: freq = getattr(index, "inferred_freq", None) if freq == "B": - weekdays = np.unique(index.dayofweek) # type: ignore + # error: "Index" has no attribute "dayofweek" + weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined] if (5 in weekdays) or (6 in weekdays): freq = None diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 59ee88117a984..70eb9e502f78a 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1514,23 +1514,24 @@ def test_slice_locs_na_raises(self): @pytest.mark.parametrize( "in_slice,expected", [ + # error: Slice index must be an integer or None (pd.IndexSlice[::-1], "yxdcb"), - (pd.IndexSlice["b":"y":-1], ""), # type: ignore - (pd.IndexSlice["b"::-1], "b"), # type: ignore - (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore - (pd.IndexSlice[:"y":-1], "y"), # type: ignore - (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore - (pd.IndexSlice["y"::-4], "yb"), # type: ignore + (pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc] + (pd.IndexSlice["b"::-1], "b"), # type: ignore[misc] + (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc] + (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc] # absent labels - (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore - (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore - (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore - (pd.IndexSlice["z"::-3], "yc"), # type: ignore - (pd.IndexSlice["m"::-1], "dcb"), # type: ignore - (pd.IndexSlice[:"m":-1], "yx"), # type: ignore - (pd.IndexSlice["a":"a":-1], ""), # type: ignore - (pd.IndexSlice["z":"z":-1], ""), # type: ignore - (pd.IndexSlice["m":"m":-1], ""), # type: ignore + (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc] + (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc] + (pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc] + (pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc] + (pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc] + (pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc] + (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] ], ) def test_slice_locs_negative_step(self, in_slice, expected): diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index df014171be817..0942c79837e7c 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -1751,9 +1751,9 @@ def col(t, column): # try to index a col which isn't a data_column msg = ( - f"column string2 is not a data_column.\n" - f"In order to read column string2 you must reload the dataframe \n" - f"into HDFStore and include string2 with the data_columns argument." + "column string2 is not a data_column.\n" + "In order to read column string2 you must reload the dataframe \n" + "into HDFStore and include string2 with the data_columns argument." ) with pytest.raises(AttributeError, match=msg): store.create_table_index("f", columns=["string2"]) diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index d64e2d1933ace..a0723452ccb70 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -15,7 +15,8 @@ ) # the ignore on the following line accounts for to_csv returning Optional(str) # in general, but always str in the case we give no filename -text = df1.to_csv(index=False).encode() # type: ignore +# error: Item "None" of "Optional[str]" has no attribute "encode" +text = df1.to_csv(index=False).encode() # type: ignore[union-attr] @pytest.fixture diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 6135ccba1573d..f81bca7e85156 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -323,7 +323,8 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: sig = inspect.Signature(params) # https://github.com/python/typing/issues/598 - func.__signature__ = sig # type: ignore + # error: "F" has no attribute "__signature__" + func.__signature__ = sig # type: ignore[attr-defined] return cast(F, wrapper) return decorate @@ -357,8 +358,12 @@ def decorator(decorated: F) -> F: for docstring in docstrings: if hasattr(docstring, "_docstring_components"): + # error: Item "str" of "Union[str, Callable[..., Any]]" has no + # attribute "_docstring_components" [union-attr] + # error: Item "function" of "Union[str, Callable[..., Any]]" + # has no attribute "_docstring_components" [union-attr] docstring_components.extend( - docstring._docstring_components # type: ignore + docstring._docstring_components # type: ignore[union-attr] ) elif isinstance(docstring, str) or docstring.__doc__: docstring_components.append(docstring) @@ -373,7 +378,10 @@ def decorator(decorated: F) -> F: ] ) - decorated._docstring_components = docstring_components # type: ignore + # error: "F" has no attribute "_docstring_components" + decorated._docstring_components = ( # type: ignore[attr-defined] + docstring_components + ) return decorated return decorator diff --git a/requirements-dev.txt b/requirements-dev.txt index c0dd77cd73ddc..6a87b0a99a4f8 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -73,4 +73,5 @@ cftime pyreadstat tabulate>=0.8.3 git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master -git+https://github.com/numpy/numpydoc \ No newline at end of file +git+https://github.com/numpy/numpydoc +pyflakes>=2.2.0 \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index ee5725e36d193..84c281b756395 100644 --- a/setup.cfg +++ b/setup.cfg @@ -122,6 +122,7 @@ check_untyped_defs=True strict_equality=True warn_redundant_casts = True warn_unused_ignores = True +show_error_codes = True [mypy-pandas.tests.*] check_untyped_defs=False From 877dc15378088cad5257366f0dfb9a64e40efc37 Mon Sep 17 00:00:00 2001 From: Eric Wieser Date: Thu, 6 Aug 2020 16:14:43 +0100 Subject: [PATCH 210/632] Ensure _group_selection_context is always reset (#35572) --- pandas/core/groupby/groupby.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ac45222625569..6c8a780859939 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -463,8 +463,10 @@ def _group_selection_context(groupby): Set / reset the _group_selection_context. """ groupby._set_group_selection() - yield groupby - groupby._reset_group_selection() + try: + yield groupby + finally: + groupby._reset_group_selection() _KeysArgType = Union[ From 6af60b28d5356574e992d2b71280087d9774f343 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Thu, 6 Aug 2020 16:47:59 +0100 Subject: [PATCH 211/632] CLN: remove kwargs in RangeIndex.copy (#35575) --- pandas/core/indexes/range.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 1dc4fc1e91462..e9c4c301f4dca 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -385,11 +385,13 @@ def _shallow_copy(self, values=None, name: Label = no_default): return Int64Index._simple_new(values, name=name) @doc(Int64Index.copy) - def copy(self, name=None, deep=False, dtype=None, **kwargs): + def copy(self, name=None, deep=False, dtype=None, names=None): self._validate_dtype(dtype) - if name is None: - name = self.name - return self.from_range(self._range, name=name) + + new_index = self._shallow_copy() + names = self._validate_names(name=name, names=names, deep=deep) + new_index = new_index.set_names(names) + return new_index def _minmax(self, meth: str): no_steps = len(self) - 1 From b05fdcd4254ffb6193f9ec39c48b4d7e480fabc7 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 6 Aug 2020 08:49:06 -0700 Subject: [PATCH 212/632] ENH: enable mul, div on Index by dispatching to Series (#34160) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/indexes/base.py | 58 +------------------ pandas/core/indexes/category.py | 2 - pandas/core/indexes/datetimes.py | 1 - pandas/core/indexes/multi.py | 35 +++++++++++ pandas/core/indexes/period.py | 1 - pandas/tests/arithmetic/test_numeric.py | 14 ----- .../indexes/categorical/test_category.py | 9 ++- pandas/tests/indexes/common.py | 33 ++++++++--- 9 files changed, 73 insertions(+), 82 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 304897edbb75e..82037a332e0f9 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -17,7 +17,7 @@ Enhancements Other enhancements ^^^^^^^^^^^^^^^^^^ - +- :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - - diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 278999930463f..bfdfbd35f27ad 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2377,31 +2377,10 @@ def _get_unique_index(self, dropna: bool = False): # -------------------------------------------------------------------- # Arithmetic & Logical Methods - def __add__(self, other): - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - from pandas import Series - - return Index(Series(self) + other) - - def __radd__(self, other): - from pandas import Series - - return Index(other + Series(self)) - def __iadd__(self, other): # alias for __add__ return self + other - def __sub__(self, other): - return Index(np.array(self) - other) - - def __rsub__(self, other): - # wrap Series to ensure we pin name correctly - from pandas import Series - - return Index(other - Series(self)) - def __and__(self, other): return self.intersection(other) @@ -5293,38 +5272,6 @@ def _add_comparison_methods(cls): cls.__le__ = _make_comparison_op(operator.le, cls) cls.__ge__ = _make_comparison_op(operator.ge, cls) - @classmethod - def _add_numeric_methods_add_sub_disabled(cls): - """ - Add in the numeric add/sub methods to disable. - """ - cls.__add__ = make_invalid_op("__add__") - cls.__radd__ = make_invalid_op("__radd__") - cls.__iadd__ = make_invalid_op("__iadd__") - cls.__sub__ = make_invalid_op("__sub__") - cls.__rsub__ = make_invalid_op("__rsub__") - cls.__isub__ = make_invalid_op("__isub__") - - @classmethod - def _add_numeric_methods_disabled(cls): - """ - Add in numeric methods to disable other than add/sub. - """ - cls.__pow__ = make_invalid_op("__pow__") - cls.__rpow__ = make_invalid_op("__rpow__") - cls.__mul__ = make_invalid_op("__mul__") - cls.__rmul__ = make_invalid_op("__rmul__") - cls.__floordiv__ = make_invalid_op("__floordiv__") - cls.__rfloordiv__ = make_invalid_op("__rfloordiv__") - cls.__truediv__ = make_invalid_op("__truediv__") - cls.__rtruediv__ = make_invalid_op("__rtruediv__") - cls.__mod__ = make_invalid_op("__mod__") - cls.__divmod__ = make_invalid_op("__divmod__") - cls.__neg__ = make_invalid_op("__neg__") - cls.__pos__ = make_invalid_op("__pos__") - cls.__abs__ = make_invalid_op("__abs__") - cls.__inv__ = make_invalid_op("__inv__") - @classmethod def _add_numeric_methods_binary(cls): """ @@ -5340,11 +5287,12 @@ def _add_numeric_methods_binary(cls): cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls) cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls) - # TODO: rmod? rdivmod? cls.__mod__ = _make_arithmetic_op(operator.mod, cls) + cls.__rmod__ = _make_arithmetic_op(ops.rmod, cls) cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls) cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls) cls.__divmod__ = _make_arithmetic_op(divmod, cls) + cls.__rdivmod__ = _make_arithmetic_op(ops.rdivmod, cls) cls.__mul__ = _make_arithmetic_op(operator.mul, cls) cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls) @@ -5504,7 +5452,7 @@ def shape(self): return self._values.shape -Index._add_numeric_methods_disabled() +Index._add_numeric_methods() Index._add_logical_methods() Index._add_comparison_methods() diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 74b235655e345..fb283cbe02954 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -770,6 +770,4 @@ def _wrap_joined_index( return self._create_from_codes(joined, name=name) -CategoricalIndex._add_numeric_methods_add_sub_disabled() -CategoricalIndex._add_numeric_methods_disabled() CategoricalIndex._add_logical_methods_disabled() diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6d2e592f024ed..f71fd0d406c54 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -842,7 +842,6 @@ def indexer_between_time( return mask.nonzero()[0] -DatetimeIndex._add_numeric_methods_disabled() DatetimeIndex._add_logical_methods_disabled() diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 235da89083d0a..a6e8ec0707de7 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -50,6 +50,7 @@ from pandas.core.indexes.frozen import FrozenList from pandas.core.indexes.numeric import Int64Index import pandas.core.missing as missing +from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import ( get_group_index, indexer_from_factorized, @@ -3606,6 +3607,40 @@ def isin(self, values, level=None): return np.zeros(len(levs), dtype=np.bool_) return levs.isin(values) + @classmethod + def _add_numeric_methods_add_sub_disabled(cls): + """ + Add in the numeric add/sub methods to disable. + """ + cls.__add__ = make_invalid_op("__add__") + cls.__radd__ = make_invalid_op("__radd__") + cls.__iadd__ = make_invalid_op("__iadd__") + cls.__sub__ = make_invalid_op("__sub__") + cls.__rsub__ = make_invalid_op("__rsub__") + cls.__isub__ = make_invalid_op("__isub__") + + @classmethod + def _add_numeric_methods_disabled(cls): + """ + Add in numeric methods to disable other than add/sub. + """ + cls.__pow__ = make_invalid_op("__pow__") + cls.__rpow__ = make_invalid_op("__rpow__") + cls.__mul__ = make_invalid_op("__mul__") + cls.__rmul__ = make_invalid_op("__rmul__") + cls.__floordiv__ = make_invalid_op("__floordiv__") + cls.__rfloordiv__ = make_invalid_op("__rfloordiv__") + cls.__truediv__ = make_invalid_op("__truediv__") + cls.__rtruediv__ = make_invalid_op("__rtruediv__") + cls.__mod__ = make_invalid_op("__mod__") + cls.__rmod__ = make_invalid_op("__rmod__") + cls.__divmod__ = make_invalid_op("__divmod__") + cls.__rdivmod__ = make_invalid_op("__rdivmod__") + cls.__neg__ = make_invalid_op("__neg__") + cls.__pos__ = make_invalid_op("__pos__") + cls.__abs__ = make_invalid_op("__abs__") + cls.__inv__ = make_invalid_op("__inv__") + MultiIndex._add_numeric_methods_disabled() MultiIndex._add_numeric_methods_add_sub_disabled() diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 03e11b652477f..c7199e4a28a17 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -724,7 +724,6 @@ def memory_usage(self, deep=False): return result -PeriodIndex._add_numeric_methods_disabled() PeriodIndex._add_logical_methods_disabled() diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 2155846b271fc..484f83deb0f55 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -548,20 +548,6 @@ class TestMultiplicationDivision: # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ # for non-timestamp/timedelta/period dtypes - @pytest.mark.parametrize( - "box", - [ - pytest.param( - pd.Index, - marks=pytest.mark.xfail( - reason="Index.__div__ always raises", raises=TypeError - ), - ), - pd.Series, - pd.DataFrame, - ], - ids=lambda x: x.__name__, - ) def test_divide_decimal(self, box): # resolves issue GH#9787 ser = Series([Decimal(10)]) diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index 8af26eef504fc..b325edb321ed4 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -43,7 +43,14 @@ def test_disallow_addsub_ops(self, func, op_name): # GH 10039 # set ops (+/-) raise TypeError idx = pd.Index(pd.Categorical(["a", "b"])) - msg = f"cannot perform {op_name} with this index type: CategoricalIndex" + cat_or_list = "'(Categorical|list)' and '(Categorical|list)'" + msg = "|".join( + [ + f"cannot perform {op_name} with this index type: CategoricalIndex", + "can only concatenate list", + rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}", + ] + ) with pytest.raises(TypeError, match=msg): func(idx) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 3b41c4bfacf73..238ee8d304d05 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -146,22 +146,41 @@ def test_numeric_compat(self): # Check that this doesn't cover MultiIndex case, if/when it does, # we can remove multi.test_compat.test_numeric_compat assert not isinstance(idx, MultiIndex) + if type(idx) is Index: + return - with pytest.raises(TypeError, match="cannot perform __mul__"): + typ = type(idx._data).__name__ + lmsg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'", + "cannot perform (__mul__|__truediv__|__floordiv__) with " + f"this index type: {typ}", + ] + ) + with pytest.raises(TypeError, match=lmsg): idx * 1 - with pytest.raises(TypeError, match="cannot perform __rmul__"): + rmsg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'", + "cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with " + f"this index type: {typ}", + ] + ) + with pytest.raises(TypeError, match=rmsg): 1 * idx - div_err = "cannot perform __truediv__" + div_err = lmsg.replace("*", "/") with pytest.raises(TypeError, match=div_err): idx / 1 - - div_err = div_err.replace(" __", " __r") + div_err = rmsg.replace("*", "/") with pytest.raises(TypeError, match=div_err): 1 / idx - with pytest.raises(TypeError, match="cannot perform __floordiv__"): + + floordiv_err = lmsg.replace("*", "//") + with pytest.raises(TypeError, match=floordiv_err): idx // 1 - with pytest.raises(TypeError, match="cannot perform __rfloordiv__"): + floordiv_err = rmsg.replace("*", "//") + with pytest.raises(TypeError, match=floordiv_err): 1 // idx def test_logical_compat(self): From f0e3f0a5dbaa1d5eac12e405a574d404b5398fec Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 6 Aug 2020 09:20:28 -0700 Subject: [PATCH 213/632] BUG: TDA.__floordiv__ with NaT (#35583) * BUG: TDA.__floordiv__ with NaT * whatsnew --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/arrays/timedeltas.py | 2 +- pandas/tests/arithmetic/test_timedelta64.py | 17 +++++++++++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 82037a332e0f9..3ec53227003fd 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -64,7 +64,7 @@ Datetimelike Timedelta ^^^^^^^^^ - +- Bug in :class:`TimedeltaIndex`, :class:`Series`, and :class:`DataFrame` floor-division with ``timedelta64`` dtypes and ``NaT`` in the denominator (:issue:`35529`) - - diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a378423df788b..99a4725c2d806 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -628,7 +628,7 @@ def __floordiv__(self, other): result = self.asi8 // other.asi8 mask = self._isnan | other._isnan if mask.any(): - result = result.astype(np.int64) + result = result.astype(np.float64) result[mask] = np.nan return result diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index f94408d657ae5..64d3d5b6d684d 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -1733,6 +1733,23 @@ def test_tdarr_div_length_mismatch(self, box_with_array): # ------------------------------------------------------------------ # __floordiv__, __rfloordiv__ + def test_td64arr_floordiv_td64arr_with_nat(self, box_with_array): + # GH#35529 + box = box_with_array + + left = pd.Series([1000, 222330, 30], dtype="timedelta64[ns]") + right = pd.Series([1000, 222330, None], dtype="timedelta64[ns]") + + left = tm.box_expected(left, box) + right = tm.box_expected(right, box) + + expected = np.array([1.0, 1.0, np.nan], dtype=np.float64) + expected = tm.box_expected(expected, box) + + result = left // right + + tm.assert_equal(result, expected) + def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td): # GH#18831 td1 = Series([timedelta(minutes=5, seconds=3)] * 3) From 7cf2d0fbba4716d39b1b6e62f9ce370ea0f5254f Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Thu, 6 Aug 2020 10:45:19 -0700 Subject: [PATCH 214/632] BUG: RollingGroupby respects __getitem__ (#35513) --- doc/source/whatsnew/v1.1.1.rst | 3 +-- pandas/core/window/rolling.py | 4 ++++ pandas/tests/window/test_grouper.py | 25 +++++++++++++++++++++++++ 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 6a327a4fc732f..5e36bfe6b6307 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -16,8 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). -- -- +- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 445f179248226..87bcaa7d9512f 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2220,6 +2220,10 @@ def _apply( def _constructor(self): return Rolling + @cache_readonly + def _selected_obj(self): + return self._groupby._selected_obj + def _create_blocks(self, obj: FrameOrSeries): """ Split data into blocks & return conformed data. diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 744ca264e91d9..ca5a9eccea4f5 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -214,3 +214,28 @@ def foo(x): name="value", ) tm.assert_series_equal(result, expected) + + def test_groupby_subselect_rolling(self): + # GH 35486 + df = DataFrame( + {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]} + ) + result = df.groupby("a")[["b"]].rolling(2).max() + expected = DataFrame( + [np.nan, np.nan, 2.0, np.nan], + columns=["b"], + index=pd.MultiIndex.from_tuples( + ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None] + ), + ) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].rolling(2).max() + expected = Series( + [np.nan, np.nan, 2.0, np.nan], + index=pd.MultiIndex.from_tuples( + ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None] + ), + name="b", + ) + tm.assert_series_equal(result, expected) From 25ff71579311961aadace1c1b02c5e68db126aed Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Thu, 6 Aug 2020 14:51:05 -0700 Subject: [PATCH 215/632] CLN: get_flattened_iterator (#35515) --- pandas/core/groupby/ops.py | 4 +-- pandas/core/sorting.py | 53 +++++++++++++++----------------------- 2 files changed, 23 insertions(+), 34 deletions(-) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 3aaeef3b63760..c6b0732b04c09 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -50,7 +50,7 @@ from pandas.core.sorting import ( compress_group_index, decons_obs_group_ids, - get_flattened_iterator, + get_flattened_list, get_group_index, get_group_index_sorter, get_indexer_dict, @@ -153,7 +153,7 @@ def _get_group_keys(self): comp_ids, _, ngroups = self.group_info # provide "flattened" iterator for multi-group setting - return get_flattened_iterator(comp_ids, ngroups, self.levels, self.codes) + return get_flattened_list(comp_ids, ngroups, self.levels, self.codes) def apply(self, f: F, data: FrameOrSeries, axis: int = 0): mutated = self.mutated diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index ee73aa42701b0..8bdd466ae6f33 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -1,5 +1,6 @@ """ miscellaneous sorting / groupby utilities """ -from typing import Callable, Optional +from collections import defaultdict +from typing import TYPE_CHECKING, Callable, DefaultDict, Iterable, List, Optional, Tuple import numpy as np @@ -18,6 +19,9 @@ import pandas.core.algorithms as algorithms from pandas.core.construction import extract_array +if TYPE_CHECKING: + from pandas.core.indexes.base import Index # noqa:F401 + _INT64_MAX = np.iinfo(np.int64).max @@ -409,7 +413,7 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None): levels : Optional[List], if values is a MultiIndex, list of levels to apply the key to. """ - from pandas.core.indexes.api import Index + from pandas.core.indexes.api import Index # noqa:F811 if not key: return values @@ -440,36 +444,21 @@ def ensure_key_mapped(values, key: Optional[Callable], levels=None): return result -class _KeyMapper: - """ - Map compressed group id -> key tuple. - """ - - def __init__(self, comp_ids, ngroups: int, levels, labels): - self.levels = levels - self.labels = labels - self.comp_ids = comp_ids.astype(np.int64) - - self.k = len(labels) - self.tables = [hashtable.Int64HashTable(ngroups) for _ in range(self.k)] - - self._populate_tables() - - def _populate_tables(self): - for labs, table in zip(self.labels, self.tables): - table.map(self.comp_ids, labs.astype(np.int64)) - - def get_key(self, comp_id): - return tuple( - level[table.get_item(comp_id)] - for table, level in zip(self.tables, self.levels) - ) - - -def get_flattened_iterator(comp_ids, ngroups, levels, labels): - # provide "flattened" iterator for multi-group setting - mapper = _KeyMapper(comp_ids, ngroups, levels, labels) - return [mapper.get_key(i) for i in range(ngroups)] +def get_flattened_list( + comp_ids: np.ndarray, + ngroups: int, + levels: Iterable["Index"], + labels: Iterable[np.ndarray], +) -> List[Tuple]: + """Map compressed group id -> key tuple.""" + comp_ids = comp_ids.astype(np.int64, copy=False) + arrays: DefaultDict[int, List[int]] = defaultdict(list) + for labs, level in zip(labels, levels): + table = hashtable.Int64HashTable(ngroups) + table.map(comp_ids, labs.astype(np.int64, copy=False)) + for i in range(ngroups): + arrays[i].append(level[table.get_item(i)]) + return [tuple(array) for array in arrays.values()] def get_indexer_dict(label_list, keys): From c4691d6eefbd8ed65c273eaf352eed1ae21fafc0 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Thu, 6 Aug 2020 17:00:26 -0500 Subject: [PATCH 216/632] DOC: Document that read_hdf can use pickle (#35554) --- doc/source/user_guide/io.rst | 9 ++++--- pandas/io/pytables.py | 51 +++++++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index d4be9d802d697..cc42f952b1733 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3441,10 +3441,11 @@ for some advanced strategies .. warning:: - pandas requires ``PyTables`` >= 3.0.0. - There is a indexing bug in ``PyTables`` < 3.2 which may appear when querying stores using an index. - If you see a subset of results being returned, upgrade to ``PyTables`` >= 3.2. - Stores created previously will need to be rewritten using the updated version. + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle. Loading pickled data received from + untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. .. ipython:: python :suppress: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 9f5b6041b0ffa..aeb7b3e044794 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -289,7 +289,15 @@ def read_hdf( Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where - criteria + criteria. + + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- @@ -445,6 +453,14 @@ class HDFStore: Either Fixed or Table format. + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + Parameters ---------- path : str @@ -789,6 +805,14 @@ def select( """ Retrieve pandas object stored in file, optionally based on where criteria. + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + Parameters ---------- key : str @@ -852,6 +876,15 @@ def select_as_coordinates( """ return the selection as an Index + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + + Parameters ---------- key : str @@ -876,6 +909,14 @@ def select_column( return a single column from the table. This is generally only useful to select an indexable + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + Parameters ---------- key : str @@ -912,6 +953,14 @@ def select_as_multiple( """ Retrieve pandas objects from multiple tables. + .. warning:: + + Pandas uses PyTables for reading and writing HDF5 files, which allows + serializing object-dtype data with pickle when using the "fixed" format. + Loading pickled data received from untrusted sources can be unsafe. + + See: https://docs.python.org/3/library/pickle.html for more. + Parameters ---------- keys : a list of the tables From d82e5403625c863327f6b89d3137eb6164f00e1a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 6 Aug 2020 15:06:46 -0700 Subject: [PATCH 217/632] BUG: NaT.__cmp__(invalid) should raise TypeError (#35585) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/tslibs/nattype.pyx | 31 +++++++---------- pandas/tests/scalar/test_nat.py | 62 +++++++++++++++++++++++++++++++-- 3 files changed, 73 insertions(+), 21 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3ec53227003fd..7168fc0078083 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -60,6 +60,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ - Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) +- Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`) - Timedelta diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 73df51832d700..79f50c7261905 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -107,30 +107,25 @@ cdef class _NaT(datetime): __array_priority__ = 100 def __richcmp__(_NaT self, object other, int op): - cdef: - int ndim = getattr(other, "ndim", -1) + if util.is_datetime64_object(other) or PyDateTime_Check(other): + # We treat NaT as datetime-like for this comparison + return _nat_scalar_rules[op] - if ndim == -1: + elif util.is_timedelta64_object(other) or PyDelta_Check(other): + # We treat NaT as timedelta-like for this comparison return _nat_scalar_rules[op] elif util.is_array(other): - result = np.empty(other.shape, dtype=np.bool_) - result.fill(_nat_scalar_rules[op]) + if other.dtype.kind in "mM": + result = np.empty(other.shape, dtype=np.bool_) + result.fill(_nat_scalar_rules[op]) + elif other.dtype.kind == "O": + result = np.array([PyObject_RichCompare(self, x, op) for x in other]) + else: + return NotImplemented return result - elif ndim == 0: - if util.is_datetime64_object(other): - return _nat_scalar_rules[op] - else: - raise TypeError( - f"Cannot compare type {type(self).__name__} " - f"with type {type(other).__name__}" - ) - - # Note: instead of passing "other, self, _reverse_ops[op]", we observe - # that `_nat_scalar_rules` is invariant under `_reverse_ops`, - # rendering it unnecessary. - return PyObject_RichCompare(other, self, op) + return NotImplemented def __add__(self, other): if self is not c_NaT: diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index e1e2ea1a5cec8..03830019affa1 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -513,11 +513,67 @@ def test_to_numpy_alias(): assert isna(expected) and isna(result) -@pytest.mark.parametrize("other", [Timedelta(0), Timestamp(0)]) +@pytest.mark.parametrize( + "other", + [ + Timedelta(0), + Timedelta(0).to_pytimedelta(), + pytest.param( + Timedelta(0).to_timedelta64(), + marks=pytest.mark.xfail( + reason="td64 doesnt return NotImplemented, see numpy#17017" + ), + ), + Timestamp(0), + Timestamp(0).to_pydatetime(), + pytest.param( + Timestamp(0).to_datetime64(), + marks=pytest.mark.xfail( + reason="dt64 doesnt return NotImplemented, see numpy#17017" + ), + ), + Timestamp(0).tz_localize("UTC"), + NaT, + ], +) def test_nat_comparisons(compare_operators_no_eq_ne, other): # GH 26039 - assert getattr(NaT, compare_operators_no_eq_ne)(other) is False - assert getattr(other, compare_operators_no_eq_ne)(NaT) is False + opname = compare_operators_no_eq_ne + + assert getattr(NaT, opname)(other) is False + + op = getattr(operator, opname.strip("_")) + assert op(NaT, other) is False + assert op(other, NaT) is False + + +@pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")]) +def test_nat_comparisons_numpy(other): + # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons + # pass, this test can be removed + assert not NaT == other + assert NaT != other + assert not NaT < other + assert not NaT > other + assert not NaT <= other + assert not NaT >= other + + +@pytest.mark.parametrize("other", ["foo", 2, 2.0]) +@pytest.mark.parametrize("op", [operator.le, operator.lt, operator.ge, operator.gt]) +def test_nat_comparisons_invalid(other, op): + # GH#35585 + assert not NaT == other + assert not other == NaT + + assert NaT != other + assert other != NaT + + with pytest.raises(TypeError): + op(NaT, other) + + with pytest.raises(TypeError): + op(other, NaT) @pytest.mark.parametrize( From 522f855a38d52763c4e2b07480786163b91dad38 Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Fri, 7 Aug 2020 00:29:21 +0100 Subject: [PATCH 218/632] BUG: GroupBy.apply() throws erroneous ValueError with duplicate axes (#35441) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/groupby/ops.py | 8 ++++---- pandas/tests/groupby/test_apply.py | 27 ++++++++++++++++++++------- pandas/tests/groupby/test_function.py | 4 ---- 4 files changed, 25 insertions(+), 15 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 7168fc0078083..6f173cb2fce12 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -133,6 +133,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) - - diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c6b0732b04c09..64eb413fe78fa 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -211,7 +211,7 @@ def apply(self, f: F, data: FrameOrSeries, axis: int = 0): # group might be modified group_axes = group.axes res = f(group) - if not _is_indexed_like(res, group_axes): + if not _is_indexed_like(res, group_axes, axis): mutated = True result_values.append(res) @@ -897,13 +897,13 @@ def agg_series( return grouper.get_result() -def _is_indexed_like(obj, axes) -> bool: +def _is_indexed_like(obj, axes, axis: int) -> bool: if isinstance(obj, Series): if len(axes) > 1: return False - return obj.index.equals(axes[0]) + return obj.axes[axis].equals(axes[axis]) elif isinstance(obj, DataFrame): - return obj.index.equals(axes[0]) + return obj.axes[axis].equals(axes[axis]) return False diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 525a6fe2637c3..665cd12225ad7 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -63,15 +63,8 @@ def test_apply_trivial(): tm.assert_frame_equal(result, expected) -@pytest.mark.xfail( - reason="GH#20066; function passed into apply " - "returns a DataFrame with the same index " - "as the one to create GroupBy object." -) def test_apply_trivial_fail(): # GH 20066 - # trivial apply fails if the constant dataframe has the same index - # with the one used to create GroupBy object. df = pd.DataFrame( {"key": ["a", "a", "b", "b", "a"], "data": [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=["key", "data"], @@ -1044,3 +1037,23 @@ def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp(): tm.assert_frame_equal(result, expected) for val in result.index.levels[1]: assert type(val) is date + + +def test_apply_by_cols_equals_apply_by_rows_transposed(): + # GH 16646 + # Operating on the columns, or transposing and operating on the rows + # should give the same result. There was previously a bug where the + # by_rows operation would work fine, but by_cols would throw a ValueError + + df = pd.DataFrame( + np.random.random([6, 4]), + columns=pd.MultiIndex.from_product([["A", "B"], [1, 2]]), + ) + + by_rows = df.T.groupby(axis=0, level=0).apply( + lambda x: x.droplevel(axis=0, level=0) + ) + by_cols = df.groupby(axis=1, level=0).apply(lambda x: x.droplevel(axis=1, level=0)) + + tm.assert_frame_equal(by_cols, by_rows.T) + tm.assert_frame_equal(by_cols, df) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index cbfba16223f74..42945be923fa0 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -940,10 +940,6 @@ def test_frame_describe_multikey(tsframe): groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) result = groupedT.describe() expected = tsframe.describe().T - expected.index = pd.MultiIndex( - levels=[[0, 1], expected.index], - codes=[[0, 0, 1, 1], range(len(expected.index))], - ) tm.assert_frame_equal(result, expected) From 11e9d111e9634ea93104df6eac4f18d1e940004a Mon Sep 17 00:00:00 2001 From: Jeff Hernandez <12969559+jeff-hernandez@users.noreply.github.com> Date: Thu, 6 Aug 2020 18:37:25 -0500 Subject: [PATCH 219/632] DOC: Add compose ecosystem docs (#35405) --- doc/source/ecosystem.rst | 7 +++++++ web/pandas/community/ecosystem.md | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index b02d4abd3ddf8..de231e43918f8 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -80,6 +80,11 @@ ML pipeline. Featuretools is a Python library for automated feature engineering built on top of pandas. It excels at transforming temporal and relational datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. +`Compose `__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Compose is a machine learning tool for labeling data and prediction engineering. It allows you to structure the labeling process by parameterizing prediction problems and transforming time-driven relational data into target values with cutoff times that can be used for supervised learning. + .. _ecosystem.visualization: Visualization @@ -445,6 +450,7 @@ Library Accessor Classes Description `pdvega`_ ``vgplot`` ``Series``, ``DataFrame`` Provides plotting functions from the Altair_ library. `pandas_path`_ ``path`` ``Index``, ``Series`` Provides `pathlib.Path`_ functions for Series. `pint-pandas`_ ``pint`` ``Series``, ``DataFrame`` Provides units support for numeric Series and DataFrames. +`composeml`_ ``slice`` ``DataFrame`` Provides a generator for enhanced data slicing. =============== ========== ========================= =============================================================== .. _cyberpandas: https://cyberpandas.readthedocs.io/en/latest @@ -453,3 +459,4 @@ Library Accessor Classes Description .. _pandas_path: https://github.com/drivendataorg/pandas-path/ .. _pathlib.Path: https://docs.python.org/3/library/pathlib.html .. _pint-pandas: https://github.com/hgrecco/pint-pandas +.. _composeml: https://github.com/FeatureLabs/compose diff --git a/web/pandas/community/ecosystem.md b/web/pandas/community/ecosystem.md index be109ea53eb7d..515d23afb93ec 100644 --- a/web/pandas/community/ecosystem.md +++ b/web/pandas/community/ecosystem.md @@ -42,6 +42,13 @@ datasets into feature matrices for machine learning using reusable feature engineering "primitives". Users can contribute their own primitives in Python and share them with the rest of the community. +### [Compose](https://github.com/FeatureLabs/compose) + +Compose is a machine learning tool for labeling data and prediction engineering. +It allows you to structure the labeling process by parameterizing +prediction problems and transforming time-driven relational data into +target values with cutoff times that can be used for supervised learning. + ## Visualization ### [Altair](https://altair-viz.github.io/) @@ -372,3 +379,4 @@ authors to coordinate on the namespace. | [pdvega](https://altair-viz.github.io/pdvega/) | `vgplot` | `Series`, `DataFrame` | | [pandas_path](https://github.com/drivendataorg/pandas-path/) | `path` | `Index`, `Series` | | [pint-pandas](https://github.com/hgrecco/pint-pandas) | `pint` | `Series`, `DataFrame` | + | [composeml](https://github.com/FeatureLabs/compose) | `slice` | `DataFrame` | From 3e10b7c7a2652743f26e73ee7095ff512c908978 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 6 Aug 2020 16:50:50 -0700 Subject: [PATCH 220/632] REF: implement _apply_blockwise (#35359) --- pandas/core/window/ewm.py | 27 +++------------- pandas/core/window/rolling.py | 60 ++++++++++++++++++++++------------- 2 files changed, 42 insertions(+), 45 deletions(-) diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 7a2d8e84bec76..c57c434dd3040 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -12,9 +12,7 @@ from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.common import is_datetime64_ns_dtype -from pandas.core.dtypes.generic import ABCDataFrame -from pandas.core.base import DataError import pandas.core.common as common from pandas.core.window.common import _doc_template, _shared_docs, zsqrt from pandas.core.window.rolling import _flex_binary_moment, _Rolling @@ -302,30 +300,13 @@ def _apply(self, func): ------- y : same type as input argument """ - blocks, obj = self._create_blocks(self._selected_obj) - block_list = list(blocks) - - results = [] - exclude = [] - for i, b in enumerate(blocks): - try: - values = self._prep_values(b.values) - - except (TypeError, NotImplementedError) as err: - if isinstance(obj, ABCDataFrame): - exclude.extend(b.columns) - del block_list[i] - continue - else: - raise DataError("No numeric types to aggregate") from err + def homogeneous_func(values: np.ndarray): if values.size == 0: - results.append(values.copy()) - continue + return values.copy() + return np.apply_along_axis(func, self.axis, values) - results.append(np.apply_along_axis(func, self.axis, values)) - - return self._wrap_results(results, block_list, obj, exclude) + return self._apply_blockwise(homogeneous_func) @Substitution(name="ewm", func_name="mean") @Appender(_doc_template) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 87bcaa7d9512f..a04d68a6d6745 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -12,7 +12,7 @@ from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import Axis, FrameOrSeries, Scalar +from pandas._typing import ArrayLike, Axis, FrameOrSeries, Scalar from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -487,6 +487,38 @@ def _get_window_indexer(self, window: int) -> BaseIndexer: return VariableWindowIndexer(index_array=self._on.asi8, window_size=window) return FixedWindowIndexer(window_size=window) + def _apply_blockwise( + self, homogeneous_func: Callable[..., ArrayLike] + ) -> FrameOrSeries: + """ + Apply the given function to the DataFrame broken down into homogeneous + sub-frames. + """ + # This isn't quite blockwise, since `blocks` is actually a collection + # of homogenenous DataFrames. + blocks, obj = self._create_blocks(self._selected_obj) + + skipped: List[int] = [] + results: List[ArrayLike] = [] + exclude: List[Scalar] = [] + for i, b in enumerate(blocks): + try: + values = self._prep_values(b.values) + + except (TypeError, NotImplementedError) as err: + if isinstance(obj, ABCDataFrame): + skipped.append(i) + exclude.extend(b.columns) + continue + else: + raise DataError("No numeric types to aggregate") from err + + result = homogeneous_func(values) + results.append(result) + + block_list = [blk for i, blk in enumerate(blocks) if i not in skipped] + return self._wrap_results(results, block_list, obj, exclude) + def _apply( self, func: Callable, @@ -524,30 +556,14 @@ def _apply( """ win_type = self._get_win_type(kwargs) window = self._get_window(win_type=win_type) - - blocks, obj = self._create_blocks(self._selected_obj) - block_list = list(blocks) window_indexer = self._get_window_indexer(window) - results = [] - exclude: List[Scalar] = [] - for i, b in enumerate(blocks): - try: - values = self._prep_values(b.values) - - except (TypeError, NotImplementedError) as err: - if isinstance(obj, ABCDataFrame): - exclude.extend(b.columns) - del block_list[i] - continue - else: - raise DataError("No numeric types to aggregate") from err + def homogeneous_func(values: np.ndarray): + # calculation function if values.size == 0: - results.append(values.copy()) - continue + return values.copy() - # calculation function offset = calculate_center_offset(window) if center else 0 additional_nans = np.array([np.nan] * offset) @@ -594,9 +610,9 @@ def calc(x): if center: result = self._center_window(result, window) - results.append(result) + return result - return self._wrap_results(results, block_list, obj, exclude) + return self._apply_blockwise(homogeneous_func) def aggregate(self, func, *args, **kwargs): result, how = self._aggregate(func, *args, **kwargs) From 71b5650f88cfa41b4e6779bd6e08b835f773cdf4 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 6 Aug 2020 19:20:03 -0700 Subject: [PATCH 221/632] BUG: validate index/data length match in DataFrame construction (#35590) * BUG: validate index/data length match in DataFrame construction * whatsnew * Other -> DataFrame --- doc/source/whatsnew/v1.1.1.rst | 4 ++++ pandas/core/internals/blocks.py | 3 --- pandas/core/internals/managers.py | 2 +- pandas/tests/frame/test_constructors.py | 6 ++++++ 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 5e36bfe6b6307..7db609fba5d68 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -50,6 +50,10 @@ Categorical - +**DataFrame** +- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`) +- + .. --------------------------------------------------------------------------- .. _whatsnew_111.contributors: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3186c555b7ae1..f3286b3c20965 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -105,7 +105,6 @@ class Block(PandasObject): is_extension = False _can_hold_na = False _can_consolidate = True - _verify_integrity = True _validate_ndim = True @classmethod @@ -1525,7 +1524,6 @@ class ExtensionBlock(Block): """ _can_consolidate = False - _verify_integrity = False _validate_ndim = False is_extension = True @@ -2613,7 +2611,6 @@ def _replace_coerce( class CategoricalBlock(ExtensionBlock): __slots__ = () is_categorical = True - _verify_integrity = True _can_hold_na = True should_store = Block.should_store diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 895385b170c91..0ce2408eb003e 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -312,7 +312,7 @@ def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: - if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: + if block.shape[1:] != mgr_shape[1:]: raise construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError( diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index a4ed548264d39..b78bb1c492ef4 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2619,6 +2619,12 @@ class DatetimeSubclass(datetime): data = pd.DataFrame({"datetime": [DatetimeSubclass(2020, 1, 1, 1, 1)]}) assert data.datetime.dtype == "datetime64[ns]" + def test_with_mismatched_index_length_raises(self): + # GH#33437 + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + with pytest.raises(ValueError, match="Shape of passed values"): + DataFrame(dti, index=range(4)) + class TestDataFrameConstructorWithDatetimeTZ: def test_from_dict(self): From 9c566a6352847fdb102f2532f620864ddba2a693 Mon Sep 17 00:00:00 2001 From: cleconte987 Date: Fri, 7 Aug 2020 11:35:39 +0200 Subject: [PATCH 222/632] DOC: Fix heading capitalization in doc/source/whatsnew (#35368) --- doc/source/whatsnew/v0.22.0.rst | 6 +++--- doc/source/whatsnew/v0.23.0.rst | 22 ++++++++++---------- doc/source/whatsnew/v0.24.0.rst | 12 +++++------ doc/source/whatsnew/v0.24.2.rst | 1 - scripts/validate_rst_title_capitalization.py | 3 +++ 5 files changed, 23 insertions(+), 21 deletions(-) diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst index 75949a90d09a6..66d3ab3305565 100644 --- a/doc/source/whatsnew/v0.22.0.rst +++ b/doc/source/whatsnew/v0.22.0.rst @@ -1,7 +1,7 @@ .. _whatsnew_0220: -v0.22.0 (December 29, 2017) ---------------------------- +Version 0.22.0 (December 29, 2017) +---------------------------------- {{ header }} @@ -96,7 +96,7 @@ returning ``1`` instead. These changes affect :meth:`DataFrame.sum` and :meth:`DataFrame.prod` as well. Finally, a few less obvious places in pandas are affected by this change. -Grouping by a categorical +Grouping by a Categorical ^^^^^^^^^^^^^^^^^^^^^^^^^ Grouping by a ``Categorical`` and summing now returns ``0`` instead of diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst index b9e1b5060d1da..f91d89679dad1 100644 --- a/doc/source/whatsnew/v0.23.0.rst +++ b/doc/source/whatsnew/v0.23.0.rst @@ -86,8 +86,8 @@ Please note that the string `index` is not supported with the round trip format, .. _whatsnew_0230.enhancements.assign_dependent: -``.assign()`` accepts dependent arguments -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Method ``.assign()`` accepts dependent arguments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :func:`DataFrame.assign` now accepts dependent keyword arguments for python version later than 3.6 (see also `PEP 468 `_). Later keyword arguments may now refer to earlier ones if the argument is a callable. See the @@ -244,7 +244,7 @@ documentation. If you build an extension array, publicize it on our .. _whatsnew_0230.enhancements.categorical_grouping: -New ``observed`` keyword for excluding unobserved categories in ``groupby`` +New ``observed`` keyword for excluding unobserved categories in ``GroupBy`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Grouping by a categorical includes the unobserved categories in the output. @@ -360,8 +360,8 @@ Fill all consecutive outside values in both directions .. _whatsnew_0210.enhancements.get_dummies_dtype: -``get_dummies`` now supports ``dtype`` argument -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Function ``get_dummies`` now supports ``dtype`` argument +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :func:`get_dummies` now accepts a ``dtype`` argument, which specifies a dtype for the new columns. The default remains uint8. (:issue:`18330`) @@ -388,8 +388,8 @@ See the :ref:`documentation here `. (:issue:`19365`) .. _whatsnew_0230.enhancements.ran_inf: -``.rank()`` handles ``inf`` values when ``NaN`` are present -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Method ``.rank()`` handles ``inf`` values when ``NaN`` are present +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In previous versions, ``.rank()`` would assign ``inf`` elements ``NaN`` as their ranks. Now ranks are calculated properly. (:issue:`6945`) @@ -587,7 +587,7 @@ If installed, we now require: .. _whatsnew_0230.api_breaking.dict_insertion_order: -Instantiation from dicts preserves dict insertion order for python 3.6+ +Instantiation from dicts preserves dict insertion order for Python 3.6+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Until Python 3.6, dicts in Python had no formally defined ordering. For Python @@ -1365,8 +1365,8 @@ MultiIndex - Bug in indexing where nested indexers having only numpy arrays are handled incorrectly (:issue:`19686`) -I/O -^^^ +IO +^^ - :func:`read_html` now rewinds seekable IO objects after parse failure, before attempting to parse with a new parser. If a parser errors and the object is non-seekable, an informative error is raised suggesting the use of a different parser (:issue:`17975`) - :meth:`DataFrame.to_html` now has an option to add an id to the leading `` tag (:issue:`8496`) @@ -1403,7 +1403,7 @@ Plotting - :func:`DataFrame.plot` now supports multiple columns to the ``y`` argument (:issue:`19699`) -Groupby/resample/rolling +GroupBy/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`) diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 45399792baecf..5bfaa7a5a3e6b 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -277,8 +277,8 @@ For earlier versions this can be done using the following. .. _whatsnew_0240.enhancements.read_html: -``read_html`` Enhancements -^^^^^^^^^^^^^^^^^^^^^^^^^^ +Function ``read_html`` enhancements +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :func:`read_html` previously ignored ``colspan`` and ``rowspan`` attributes. Now it understands them, treating them as sequences of cells with the same @@ -1371,7 +1371,7 @@ the object's ``freq`` attribute (:issue:`21939`, :issue:`23878`). .. _whatsnew_0240.deprecations.integer_tz: -Passing integer data and a timezone to datetimeindex +Passing integer data and a timezone to DatetimeIndex ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The behavior of :class:`DatetimeIndex` when passed integer data and @@ -1769,8 +1769,8 @@ MultiIndex - :class:`MultiIndex` has gained the :meth:`MultiIndex.from_frame`, it allows constructing a :class:`MultiIndex` object from a :class:`DataFrame` (:issue:`22420`) - Fix ``TypeError`` in Python 3 when creating :class:`MultiIndex` in which some levels have mixed types, e.g. when some labels are tuples (:issue:`15457`) -I/O -^^^ +IO +^^ - Bug in :func:`read_csv` in which a column specified with ``CategoricalDtype`` of boolean categories was not being correctly coerced from string values to booleans (:issue:`20498`) - Bug in :func:`read_csv` in which unicode column names were not being properly recognized with Python 2.x (:issue:`13253`) @@ -1827,7 +1827,7 @@ Plotting - Bug in :func:`DataFrame.plot.bar` caused bars to use multiple colors instead of a single one (:issue:`20585`) - Bug in validating color parameter caused extra color to be appended to the given color array. This happened to multiple plotting functions using matplotlib. (:issue:`20726`) -Groupby/resample/rolling +GroupBy/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Bug in :func:`pandas.core.window.Rolling.min` and :func:`pandas.core.window.Rolling.max` with ``closed='left'``, a datetime-like index and only one entry in the series leading to segfault (:issue:`24718`) diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index d1a893f99cff4..27e84bf0a7cd7 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -51,7 +51,6 @@ Bug fixes - Bug where calling :meth:`Series.replace` on categorical data could return a ``Series`` with incorrect dimensions (:issue:`24971`) - -- **Reshaping** diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index 5de2a07381ae5..62ec6b9ef07af 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -138,6 +138,9 @@ "CategoricalDtype", "UTC", "Panel", + "False", + "Styler", + "os", } CAP_EXCEPTIONS_DICT = {word.lower(): word for word in CAPITALIZATION_EXCEPTIONS} From 63a48bbad2f18de1dc1706811a56f31638b2a40e Mon Sep 17 00:00:00 2001 From: gabicca <33315687+gabicca@users.noreply.github.com> Date: Fri, 7 Aug 2020 12:39:31 +0100 Subject: [PATCH 223/632] Bug fix one element series truncate (#35547) --- doc/source/whatsnew/v1.1.1.rst | 2 +- pandas/core/generic.py | 2 +- pandas/tests/series/methods/test_truncate.py | 11 +++++++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 7db609fba5d68..45f1015a8e7bd 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -48,7 +48,7 @@ Categorical **Indexing** -- +- Bug in :meth:`Series.truncate` when trying to truncate a single-element series (:issue:`35544`) **DataFrame** - Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 42d02f37508fc..aaf23cc198d95 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9405,7 +9405,7 @@ def truncate( if before > after: raise ValueError(f"Truncate: {after} must be after {before}") - if ax.is_monotonic_decreasing: + if len(ax) > 1 and ax.is_monotonic_decreasing: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py index 7c82edbaec177..45592f8d99b93 100644 --- a/pandas/tests/series/methods/test_truncate.py +++ b/pandas/tests/series/methods/test_truncate.py @@ -141,3 +141,14 @@ def test_truncate_multiindex(self): expected = df.col tm.assert_series_equal(result, expected) + + def test_truncate_one_element_series(self): + # GH 35544 + series = pd.Series([0.1], index=pd.DatetimeIndex(["2020-08-04"])) + before = pd.Timestamp("2020-08-02") + after = pd.Timestamp("2020-08-04") + + result = series.truncate(before=before, after=after) + + # the input Series and the expected Series are the same + tm.assert_series_equal(result, series) From c608a38945959eb2e79ec725158dabe16993a97a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 7 Aug 2020 04:45:48 -0700 Subject: [PATCH 224/632] BUG: df.shift(n, axis=1) with multiple blocks (#35578) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/internals/managers.py | 25 ++++++++++++++++++++-- pandas/tests/frame/methods/test_shift.py | 27 ++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 45f1015a8e7bd..232e24c6db020 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) +- Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 0ce2408eb003e..4693cc193c27c 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -551,6 +551,24 @@ def interpolate(self, **kwargs) -> "BlockManager": return self.apply("interpolate", **kwargs) def shift(self, periods: int, axis: int, fill_value) -> "BlockManager": + if axis == 0 and self.ndim == 2 and self.nblocks > 1: + # GH#35488 we need to watch out for multi-block cases + ncols = self.shape[0] + if periods > 0: + indexer = [-1] * periods + list(range(ncols - periods)) + else: + nper = abs(periods) + indexer = list(range(nper, ncols)) + [-1] * nper + result = self.reindex_indexer( + self.items, + indexer, + axis=0, + fill_value=fill_value, + allow_dups=True, + consolidate=False, + ) + return result + return self.apply("shift", periods=periods, axis=axis, fill_value=fill_value) def fillna(self, value, limit, inplace: bool, downcast) -> "BlockManager": @@ -1213,6 +1231,7 @@ def reindex_indexer( fill_value=None, allow_dups: bool = False, copy: bool = True, + consolidate: bool = True, ) -> T: """ Parameters @@ -1223,7 +1242,8 @@ def reindex_indexer( fill_value : object, default None allow_dups : bool, default False copy : bool, default True - + consolidate: bool, default True + Whether to consolidate inplace before reindexing. pandas-indexer with -1's only. """ @@ -1236,7 +1256,8 @@ def reindex_indexer( result.axes[axis] = new_axis return result - self._consolidate_inplace() + if consolidate: + self._consolidate_inplace() # some axes don't allow reindexing with dups if not allow_dups: diff --git a/pandas/tests/frame/methods/test_shift.py b/pandas/tests/frame/methods/test_shift.py index 9ec029a6c4304..8f6902eca816f 100644 --- a/pandas/tests/frame/methods/test_shift.py +++ b/pandas/tests/frame/methods/test_shift.py @@ -145,6 +145,33 @@ def test_shift_duplicate_columns(self): tm.assert_frame_equal(shifted[0], shifted[1]) tm.assert_frame_equal(shifted[0], shifted[2]) + def test_shift_axis1_multiple_blocks(self): + # GH#35488 + df1 = pd.DataFrame(np.random.randint(1000, size=(5, 3))) + df2 = pd.DataFrame(np.random.randint(1000, size=(5, 2))) + df3 = pd.concat([df1, df2], axis=1) + assert len(df3._mgr.blocks) == 2 + + result = df3.shift(2, axis=1) + + expected = df3.take([-1, -1, 0, 1, 2], axis=1) + expected.iloc[:, :2] = np.nan + expected.columns = df3.columns + + tm.assert_frame_equal(result, expected) + + # Case with periods < 0 + # rebuild df3 because `take` call above consolidated + df3 = pd.concat([df1, df2], axis=1) + assert len(df3._mgr.blocks) == 2 + result = df3.shift(-2, axis=1) + + expected = df3.take([2, 3, 4, -1, -1], axis=1) + expected.iloc[:, -2:] = np.nan + expected.columns = df3.columns + + tm.assert_frame_equal(result, expected) + @pytest.mark.filterwarnings("ignore:tshift is deprecated:FutureWarning") def test_tshift(self, datetime_frame): # TODO: remove this test when tshift deprecation is enforced From 1104f0dc029482ba657f96168a42407569061940 Mon Sep 17 00:00:00 2001 From: Eric Goddard Date: Fri, 7 Aug 2020 06:49:52 -0500 Subject: [PATCH 225/632] BUG: to_timedelta fails on Int64 Series with null values (#35582) --- doc/source/whatsnew/v1.1.1.rst | 5 +++++ pandas/core/arrays/timedeltas.py | 4 +++- pandas/tests/tools/test_to_timedelta.py | 13 +++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 232e24c6db020..6b315e0a9d016 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -38,6 +38,11 @@ Categorical - - +**Timedelta** + +- Bug in :meth:`to_timedelta` fails when arg is a :class:`Series` with `Int64` dtype containing null values (:issue:`35574`) + + **Numeric** - diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 99a4725c2d806..3e21d01355dda 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -29,7 +29,7 @@ from pandas.core import nanops from pandas.core.algorithms import checked_add_with_arr -from pandas.core.arrays import datetimelike as dtl +from pandas.core.arrays import IntegerArray, datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.core.construction import extract_array @@ -921,6 +921,8 @@ def sequence_to_td64ns(data, copy=False, unit=None, errors="raise"): elif isinstance(data, (ABCTimedeltaIndex, TimedeltaArray)): inferred_freq = data.freq data = data._data + elif isinstance(data, IntegerArray): + data = data.to_numpy("int64", na_value=tslibs.iNaT) # Convert whatever we have into timedelta64[ns] dtype if is_object_dtype(data.dtype) or is_string_dtype(data.dtype): diff --git a/pandas/tests/tools/test_to_timedelta.py b/pandas/tests/tools/test_to_timedelta.py index 1e193f22a6698..f68d83f7f4d58 100644 --- a/pandas/tests/tools/test_to_timedelta.py +++ b/pandas/tests/tools/test_to_timedelta.py @@ -166,3 +166,16 @@ def test_to_timedelta_ignore_strings_unit(self): arr = np.array([1, 2, "error"], dtype=object) result = pd.to_timedelta(arr, unit="ns", errors="ignore") tm.assert_numpy_array_equal(result, arr) + + def test_to_timedelta_nullable_int64_dtype(self): + # GH 35574 + expected = Series([timedelta(days=1), timedelta(days=2)]) + result = to_timedelta(Series([1, 2], dtype="Int64"), unit="days") + + tm.assert_series_equal(result, expected) + + # IntegerArray Series with nulls + expected = Series([timedelta(days=1), None]) + result = to_timedelta(Series([1, None], dtype="Int64"), unit="days") + + tm.assert_series_equal(result, expected) From 3b88446457b17feaa73ab1e6433d0e1e7a4fd9bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Fri, 7 Aug 2020 07:53:09 -0400 Subject: [PATCH 226/632] support binary file handles in to_csv (#35129) --- doc/source/user_guide/io.rst | 17 ++++++ doc/source/whatsnew/v1.2.0.rst | 23 +++++++- pandas/core/generic.py | 16 ++++- pandas/io/common.py | 17 ++++-- pandas/io/formats/csvs.py | 82 +++++++++++--------------- pandas/tests/io/formats/test_to_csv.py | 36 +++++++++++ pandas/tests/io/test_common.py | 11 ++++ pandas/tests/io/test_compression.py | 16 +++++ 8 files changed, 158 insertions(+), 60 deletions(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cc42f952b1733..ab233f653061a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -1064,6 +1064,23 @@ DD/MM/YYYY instead. For convenience, a ``dayfirst`` keyword is provided: pd.read_csv('tmp.csv', parse_dates=[0]) pd.read_csv('tmp.csv', dayfirst=True, parse_dates=[0]) +Writing CSVs to binary file objects ++++++++++++++++++++++++++++++++++++ + +.. versionadded:: 1.2.0 + +``df.to_csv(..., mode="w+b")`` allows writing a CSV to a file object +opened binary mode. For this to work, it is necessary that ``mode`` +contains a "b": + +.. ipython:: python + + import io + + data = pd.DataFrame([0, 1, 2]) + buffer = io.BytesIO() + data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip") + .. _io.float_precision: Specifying method for floating-point conversion diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6f173cb2fce12..10dfd8406b8ce 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -13,6 +13,25 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +.. _whatsnew_120.binary_handle_to_csv: + +Support for binary file handles in ``to_csv`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:meth:`to_csv` supports file handles in binary mode (:issue:`19827` and :issue:`35058`) +with ``encoding`` (:issue:`13068` and :issue:`23854`) and ``compression`` (:issue:`22555`). +``mode`` has to contain a ``b`` for binary handles to be supported. + +For example: + +.. ipython:: python + + import io + + data = pd.DataFrame([0, 1, 2]) + buffer = io.BytesIO() + data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip") + .. _whatsnew_120.enhancements.other: Other enhancements @@ -121,7 +140,7 @@ MultiIndex I/O ^^^ -- +- Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) - Plotting @@ -167,4 +186,4 @@ Other .. _whatsnew_120.contributors: Contributors -~~~~~~~~~~~~ \ No newline at end of file +~~~~~~~~~~~~ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index aaf23cc198d95..441eef26bea58 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3021,13 +3021,18 @@ def to_csv( ---------- path_or_buf : str or file handle, default None File path or object, if None is provided the result is returned as - a string. If a file object is passed it should be opened with - `newline=''`, disabling universal newlines. + a string. If a non-binary file object is passed, it should be opened + with `newline=''`, disabling universal newlines. If a binary + file object is passed, `mode` needs to contain a `'b'`. .. versionchanged:: 0.24.0 Was previously named "path" for Series. + .. versionchanged:: 1.2.0 + + Support for binary file objects was introduced. + sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' @@ -3056,7 +3061,8 @@ def to_csv( Python write mode, default 'w'. encoding : str, optional A string representing the encoding to use in the output file, - defaults to 'utf-8'. + defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` + is a non-binary file object. compression : str or dict, default 'infer' If str, represents compression mode. If dict, value at 'method' is the compression mode. Compression mode may be any of the following @@ -3080,6 +3086,10 @@ def to_csv( supported for compression modes 'gzip' and 'bz2' as well as 'zip'. + .. versionchanged:: 1.2.0 + + Compression is supported for non-binary file objects. + quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC diff --git a/pandas/io/common.py b/pandas/io/common.py index f39b8279fbdb0..34e4425c657f1 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -407,8 +407,9 @@ def get_handle( memory_map : boolean, default False See parsers._parser_params for more information. is_text : boolean, default True - whether file/buffer is in text format (csv, json, etc.), or in binary - mode (pickle, etc.). + Whether the type of the content passed to the file/buffer is string or + bytes. This is not the same as `"b" not in mode`. If a string content is + passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list @@ -449,14 +450,14 @@ def get_handle( if is_path: f = gzip.open(path_or_buf, mode, **compression_args) else: - f = gzip.GzipFile(fileobj=path_or_buf, **compression_args) + f = gzip.GzipFile(fileobj=path_or_buf, mode=mode, **compression_args) # BZ Compression elif compression == "bz2": if is_path: f = bz2.BZ2File(path_or_buf, mode, **compression_args) else: - f = bz2.BZ2File(path_or_buf, **compression_args) + f = bz2.BZ2File(path_or_buf, mode=mode, **compression_args) # ZIP Compression elif compression == "zip": @@ -489,10 +490,14 @@ def get_handle( handles.append(f) elif is_path: - if encoding: + # Check whether the filename is to be opened in binary mode. + # Binary mode does not support 'encoding' and 'newline'. + is_binary_mode = "b" in mode + + if encoding and not is_binary_mode: # Encoding f = open(path_or_buf, mode, encoding=encoding, errors=errors, newline="") - elif is_text: + elif is_text and not is_binary_mode: # No explicit encoding f = open(path_or_buf, mode, errors="replace", newline="") else: diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 5bd51dc8351f6..b10946a20d041 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -3,11 +3,10 @@ """ import csv as csvlib -from io import StringIO +from io import StringIO, TextIOWrapper import os from typing import Hashable, List, Mapping, Optional, Sequence, Union import warnings -from zipfile import ZipFile import numpy as np @@ -159,38 +158,29 @@ def save(self) -> None: """ Create the writer & save. """ - # GH21227 internal compression is not used when file-like passed. - if self.compression and hasattr(self.path_or_buf, "write"): + # GH21227 internal compression is not used for non-binary handles. + if ( + self.compression + and hasattr(self.path_or_buf, "write") + and "b" not in self.mode + ): warnings.warn( - "compression has no effect when passing file-like object as input.", + "compression has no effect when passing a non-binary object as input.", RuntimeWarning, stacklevel=2, ) - - # when zip compression is called. - is_zip = isinstance(self.path_or_buf, ZipFile) or ( - not hasattr(self.path_or_buf, "write") and self.compression == "zip" + self.compression = None + + # get a handle or wrap an existing handle to take care of 1) compression and + # 2) text -> byte conversion + f, handles = get_handle( + self.path_or_buf, + self.mode, + encoding=self.encoding, + errors=self.errors, + compression=dict(self.compression_args, method=self.compression), ) - if is_zip: - # zipfile doesn't support writing string to archive. uses string - # buffer to receive csv writing and dump into zip compression - # file handle. GH21241, GH21118 - f = StringIO() - close = False - elif hasattr(self.path_or_buf, "write"): - f = self.path_or_buf - close = False - else: - f, handles = get_handle( - self.path_or_buf, - self.mode, - encoding=self.encoding, - errors=self.errors, - compression=dict(self.compression_args, method=self.compression), - ) - close = True - try: # Note: self.encoding is irrelevant here self.writer = csvlib.writer( @@ -206,29 +196,23 @@ def save(self) -> None: self._save() finally: - if is_zip: - # GH17778 handles zip compression separately. - buf = f.getvalue() - if hasattr(self.path_or_buf, "write"): - self.path_or_buf.write(buf) - else: - compression = dict(self.compression_args, method=self.compression) - - f, handles = get_handle( - self.path_or_buf, - self.mode, - encoding=self.encoding, - errors=self.errors, - compression=compression, - ) - f.write(buf) - close = True - if close: + if self.should_close: f.close() - for _fh in handles: - _fh.close() - elif self.should_close: + elif ( + isinstance(f, TextIOWrapper) + and not f.closed + and f != self.path_or_buf + and hasattr(self.path_or_buf, "write") + ): + # get_handle uses TextIOWrapper for non-binary handles. TextIOWrapper + # closes the wrapped handle if it is not detached. + f.flush() # make sure everything is written + f.detach() # makes f unusable + del f + elif f != self.path_or_buf: f.close() + for _fh in handles: + _fh.close() def _save_header(self): writer = self.writer diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 4c86e3a16b135..753b8b6eda9c5 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -607,3 +607,39 @@ def test_to_csv_errors(self, errors): ser.to_csv(path, errors=errors) # No use in reading back the data as it is not the same anymore # due to the error handling + + def test_to_csv_binary_handle(self): + """ + Binary file objects should work if 'mode' contains a 'b'. + + GH 35058 and GH 19827 + """ + df = tm.makeDataFrame() + with tm.ensure_clean() as path: + with open(path, mode="w+b") as handle: + df.to_csv(handle, mode="w+b") + tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + + def test_to_csv_encoding_binary_handle(self): + """ + Binary file objects should honor a specified encoding. + + GH 23854 and GH 13068 with binary handles + """ + # example from GH 23854 + content = "a, b, 🐟".encode("utf-8-sig") + buffer = io.BytesIO(content) + df = pd.read_csv(buffer, encoding="utf-8-sig") + + buffer = io.BytesIO() + df.to_csv(buffer, mode="w+b", encoding="utf-8-sig", index=False) + buffer.seek(0) # tests whether file handle wasn't closed + assert buffer.getvalue().startswith(content) + + # example from GH 13068 + with tm.ensure_clean() as path: + with open(path, "w+b") as handle: + pd.DataFrame().to_csv(handle, mode="w+b", encoding="utf-8-sig") + + handle.seek(0) + assert handle.read().startswith(b'\xef\xbb\xbf""') diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index dde38eb55ea7f..5ce2233bc0cd0 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -378,6 +378,17 @@ def test_unknown_engine(self): with pytest.raises(ValueError, match="Unknown engine"): pd.read_csv(path, engine="pyt") + def test_binary_mode(self): + """ + 'encoding' shouldn't be passed to 'open' in binary mode. + + GH 35058 + """ + with tm.ensure_clean() as path: + df = tm.makeDataFrame() + df.to_csv(path, mode="w+b") + tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + def test_is_fsspec_url(): assert icom.is_fsspec_url("gcs://pandas/somethingelse.com") diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 59c9bd0a36d3d..902a3d5d2a397 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -114,6 +114,22 @@ def test_compression_warning(compression_only): df.to_csv(f, compression=compression_only) +def test_compression_binary(compression_only): + """ + Binary file handles support compression. + + GH22555 + """ + df = tm.makeDataFrame() + with tm.ensure_clean() as path: + with open(path, mode="wb") as file: + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(path, index_col=0, compression=compression_only) + ) + + def test_with_missing_lzma(): """Tests if import pandas works when lzma is not present.""" # https://github.com/pandas-dev/pandas/issues/27575 From a21ce87329c320e8483f63f226d90587c79ae9a3 Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Fri, 7 Aug 2020 16:21:11 +0100 Subject: [PATCH 227/632] BUG: GroupBy.count() and GroupBy.sum() incorreclty return NaN instead of 0 for missing categories (#35280) --- doc/source/whatsnew/v1.2.0.rst | 3 +- pandas/core/groupby/generic.py | 8 ++++- pandas/core/groupby/groupby.py | 16 +++++++-- pandas/tests/groupby/test_categorical.py | 46 +++++++----------------- pandas/tests/reshape/test_pivot.py | 13 +++---- 5 files changed, 42 insertions(+), 44 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 10dfd8406b8ce..260b92b5989c1 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -152,6 +152,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Bug in :meth:`DataFrameGroupBy.count` and :meth:`SeriesGroupBy.sum` returning ``NaN`` for missing categories when grouped on multiple ``Categoricals``. Now returning ``0`` (:issue:`35028`) - Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) - - @@ -160,7 +161,7 @@ Groupby/resample/rolling Reshaping ^^^^^^^^^ -- +- Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) - Sparse diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c50b753cf3293..740463f0cf356 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1829,7 +1829,13 @@ def count(self): ) blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)] - return self._wrap_agged_blocks(blocks, items=data.items) + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _wrap_agged_blocks() returns. GH 35028 + with com.temp_setattr(self, "observed", True): + result = self._wrap_agged_blocks(blocks, items=data.items) + + return self._reindex_output(result, fill_value=0) def nunique(self, dropna: bool = True): """ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 6c8a780859939..ed512710295d7 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1536,9 +1536,19 @@ def size(self) -> FrameOrSeriesUnion: @doc(_groupby_agg_method_template, fname="sum", no=True, mc=0) def sum(self, numeric_only: bool = True, min_count: int = 0): - return self._agg_general( - numeric_only=numeric_only, min_count=min_count, alias="add", npfunc=np.sum - ) + + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _agg_general() returns. GH #31422 + with com.temp_setattr(self, "observed", True): + result = self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="add", + npfunc=np.sum, + ) + + return self._reindex_output(result, fill_value=0) @doc(_groupby_agg_method_template, fname="prod", no=True, mc=0) def prod(self, numeric_only: bool = True, min_count: int = 0): diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 0d447a70b540d..c74c1529eb537 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -19,7 +19,7 @@ import pandas._testing as tm -def cartesian_product_for_groupers(result, args, names): +def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN): """ Reindex to a cartesian production for the groupers, preserving the nature (Categorical) of each grouper """ @@ -33,7 +33,7 @@ def f(a): return a index = MultiIndex.from_product(map(f, args), names=names) - return result.reindex(index).sort_index() + return result.reindex(index, fill_value=fill_value).sort_index() _results_for_groupbys_with_missing_categories = dict( @@ -309,7 +309,7 @@ def test_observed(observed): result = gb.sum() if not observed: expected = cartesian_product_for_groupers( - expected, [cat1, cat2, ["foo", "bar"]], list("ABC") + expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0 ) tm.assert_frame_equal(result, expected) @@ -319,7 +319,9 @@ def test_observed(observed): expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index) result = gb.sum() if not observed: - expected = cartesian_product_for_groupers(expected, [cat1, cat2], list("AB")) + expected = cartesian_product_for_groupers( + expected, [cat1, cat2], list("AB"), fill_value=0 + ) tm.assert_frame_equal(result, expected) @@ -1189,6 +1191,8 @@ def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation): ).sortlevel() expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C") + if operation == "agg": + expected = expected.fillna(0, downcast="infer") grouped = df_cat.groupby(["A", "B"], observed=observed)["C"] result = getattr(grouped, operation)(sum) tm.assert_series_equal(result, expected) @@ -1338,15 +1342,6 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( ) request.node.add_marker(mark) - if reduction_func == "sum": # GH 31422 - mark = pytest.mark.xfail( - reason=( - "sum should return 0 but currently returns NaN. " - "This is a known bug. See GH 31422." - ) - ) - request.node.add_marker(mark) - df = pd.DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), @@ -1367,8 +1362,11 @@ def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans( val = result.loc[idx] assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan) - # If we expect unobserved values to be zero, we also expect the dtype to be int - if zero_or_nan == 0: + # If we expect unobserved values to be zero, we also expect the dtype to be int. + # Except for .sum(). If the observed categories sum to dtype=float (i.e. their + # sums have decimals), then the zeros for the missing categories should also be + # floats. + if zero_or_nan == 0 and reduction_func != "sum": assert np.issubdtype(result.dtype, np.integer) @@ -1410,24 +1408,6 @@ def test_dataframe_groupby_on_2_categoricals_when_observed_is_false( if reduction_func == "ngroup": pytest.skip("ngroup does not return the Categories on the index") - if reduction_func == "count": # GH 35028 - mark = pytest.mark.xfail( - reason=( - "DataFrameGroupBy.count returns np.NaN for missing " - "categories, when it should return 0. See GH 35028" - ) - ) - request.node.add_marker(mark) - - if reduction_func == "sum": # GH 31422 - mark = pytest.mark.xfail( - reason=( - "sum should return 0 but currently returns NaN. " - "This is a known bug. See GH 31422." - ) - ) - request.node.add_marker(mark) - df = pd.DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABC")), diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index c07a5673fe503..67b3151b0ff9c 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1817,7 +1817,7 @@ def test_categorical_aggfunc(self, observed): ["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1" ) expected_columns = pd.Index(["a", "b"], name="C2") - expected_data = np.array([[1.0, np.nan], [1.0, np.nan], [np.nan, 2.0]]) + expected_data = np.array([[1, 0], [1, 0], [0, 2]], dtype=np.int64) expected = pd.DataFrame( expected_data, index=expected_index, columns=expected_columns ) @@ -1851,18 +1851,19 @@ def test_categorical_pivot_index_ordering(self, observed): values="Sales", index="Month", columns="Year", - dropna=observed, + observed=observed, aggfunc="sum", ) expected_columns = pd.Int64Index([2013, 2014], name="Year") expected_index = pd.CategoricalIndex( - ["January"], categories=months, ordered=False, name="Month" + months, categories=months, ordered=False, name="Month" ) + expected_data = [[320, 120]] + [[0, 0]] * 11 expected = pd.DataFrame( - [[320, 120]], index=expected_index, columns=expected_columns + expected_data, index=expected_index, columns=expected_columns ) - if not observed: - result = result.dropna().astype(np.int64) + if observed: + expected = expected.loc[["January"]] tm.assert_frame_equal(result, expected) From ce03883911ec35c3eea89ed828765ec4be415263 Mon Sep 17 00:00:00 2001 From: SylvainLan Date: Fri, 7 Aug 2020 17:32:26 +0200 Subject: [PATCH 228/632] To latex position (#35284) --- pandas/core/generic.py | 5 +++ pandas/io/formats/format.py | 2 + pandas/io/formats/latex.py | 42 +++++++++++++-------- pandas/tests/io/formats/test_to_latex.py | 48 ++++++++++++++++++++++++ 4 files changed, 82 insertions(+), 15 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 441eef26bea58..6fd55c58ece40 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2840,6 +2840,7 @@ def to_latex( multirow=None, caption=None, label=None, + position=None, ): r""" Render object to a LaTeX tabular, longtable, or nested table/tabular. @@ -2925,6 +2926,9 @@ def to_latex( This is used with ``\ref{}`` in the main ``.tex`` file. .. versionadded:: 1.0.0 + position : str, optional + The LaTeX positional argument for tables, to be placed after + ``\begin{}`` in the output. %(returns)s See Also -------- @@ -2986,6 +2990,7 @@ def to_latex( multirow=multirow, caption=caption, label=label, + position=position, ) def to_csv( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c05f79f935548..81990b3d505e1 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -931,6 +931,7 @@ def to_latex( multirow: bool = False, caption: Optional[str] = None, label: Optional[str] = None, + position: Optional[str] = None, ) -> Optional[str]: """ Render a DataFrame to a LaTeX tabular/longtable environment output. @@ -946,6 +947,7 @@ def to_latex( multirow=multirow, caption=caption, label=label, + position=position, ).get_result(buf=buf, encoding=encoding) def _format_col(self, i: int) -> List[str]: diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 3a3ca84642d51..5d6f0a08ef2b5 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -38,6 +38,7 @@ def __init__( multirow: bool = False, caption: Optional[str] = None, label: Optional[str] = None, + position: Optional[str] = None, ): self.fmt = formatter self.frame = self.fmt.frame @@ -50,6 +51,8 @@ def __init__( self.caption = caption self.label = label self.escape = self.fmt.escape + self.position = position + self._table_float = any(p is not None for p in (caption, label, position)) def write_result(self, buf: IO[str]) -> None: """ @@ -284,7 +287,7 @@ def _write_tabular_begin(self, buf, column_format: str): `__ e.g 'rcl' for 3 columns """ - if self.caption is not None or self.label is not None: + if self._table_float: # then write output in a nested table/tabular environment if self.caption is None: caption_ = "" @@ -296,7 +299,12 @@ def _write_tabular_begin(self, buf, column_format: str): else: label_ = f"\n\\label{{{self.label}}}" - buf.write(f"\\begin{{table}}\n\\centering{caption_}{label_}\n") + if self.position is None: + position_ = "" + else: + position_ = f"[{self.position}]" + + buf.write(f"\\begin{{table}}{position_}\n\\centering{caption_}{label_}\n") else: # then write output only in a tabular environment pass @@ -317,7 +325,7 @@ def _write_tabular_end(self, buf): """ buf.write("\\bottomrule\n") buf.write("\\end{tabular}\n") - if self.caption is not None or self.label is not None: + if self._table_float: buf.write("\\end{table}\n") else: pass @@ -337,25 +345,29 @@ def _write_longtable_begin(self, buf, column_format: str): `__ e.g 'rcl' for 3 columns """ - buf.write(f"\\begin{{longtable}}{{{column_format}}}\n") + if self.caption is None: + caption_ = "" + else: + caption_ = f"\\caption{{{self.caption}}}" - if self.caption is not None or self.label is not None: - if self.caption is None: - pass - else: - buf.write(f"\\caption{{{self.caption}}}") + if self.label is None: + label_ = "" + else: + label_ = f"\\label{{{self.label}}}" - if self.label is None: - pass - else: - buf.write(f"\\label{{{self.label}}}") + if self.position is None: + position_ = "" + else: + position_ = f"[{self.position}]" + buf.write( + f"\\begin{{longtable}}{position_}{{{column_format}}}\n{caption_}{label_}" + ) + if self.caption is not None or self.label is not None: # a double-backslash is required at the end of the line # as discussed here: # https://tex.stackexchange.com/questions/219138 buf.write("\\\\\n") - else: - pass @staticmethod def _write_longtable_end(buf): diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 509e5bcb33304..93ad3739e59c7 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -573,6 +573,54 @@ def test_to_latex_longtable_caption_label(self): """ assert result_cl == expected_cl + def test_to_latex_position(self): + the_position = "h" + + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + + # test when only the position is provided + result_p = df.to_latex(position=the_position) + + expected_p = r"""\begin{table}[h] +\centering +\begin{tabular}{lrl} +\toprule +{} & a & b \\ +\midrule +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\bottomrule +\end{tabular} +\end{table} +""" + assert result_p == expected_p + + def test_to_latex_longtable_position(self): + the_position = "t" + + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + + # test when only the position is provided + result_p = df.to_latex(longtable=True, position=the_position) + + expected_p = r"""\begin{longtable}[t]{lrl} +\toprule +{} & a & b \\ +\midrule +\endhead +\midrule +\multicolumn{3}{r}{{Continued on next page}} \\ +\midrule +\endfoot + +\bottomrule +\endlastfoot +0 & 1 & b1 \\ +1 & 2 & b2 \\ +\end{longtable} +""" + assert result_p == expected_p + def test_to_latex_escape_special_chars(self): special_characters = ["&", "%", "$", "#", "_", "{", "}", "~", "^", "\\"] df = DataFrame(data=special_characters) From dfa546eeb9a95fb5dee1e95009c042364287fdc1 Mon Sep 17 00:00:00 2001 From: Thomas Smith Date: Fri, 7 Aug 2020 17:56:12 +0100 Subject: [PATCH 229/632] BUG: GroupBy.apply() returns different results if a different GroupBy method is called first (#35314) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/groupby/groupby.py | 89 ++++++++++---------- pandas/tests/groupby/aggregate/test_other.py | 4 +- pandas/tests/groupby/test_apply.py | 29 +++++++ pandas/tests/groupby/test_grouping.py | 8 +- 5 files changed, 82 insertions(+), 50 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 260b92b5989c1..5a4fa3150e344 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -156,7 +156,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) - - - +- Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ed512710295d7..4597afeeaddbf 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -736,13 +736,12 @@ def pipe(self, func, *args, **kwargs): def _make_wrapper(self, name): assert name in self._apply_allowlist - self._set_group_selection() - - # need to setup the selection - # as are not passed directly but in the grouper - f = getattr(self._obj_with_exclusions, name) - if not isinstance(f, types.MethodType): - return self.apply(lambda self: getattr(self, name)) + with _group_selection_context(self): + # need to setup the selection + # as are not passed directly but in the grouper + f = getattr(self._obj_with_exclusions, name) + if not isinstance(f, types.MethodType): + return self.apply(lambda self: getattr(self, name)) f = getattr(type(self._obj_with_exclusions), name) sig = inspect.signature(f) @@ -992,28 +991,28 @@ def _agg_general( alias: str, npfunc: Callable, ): - self._set_group_selection() - - # try a cython aggregation if we can - try: - return self._cython_agg_general( - how=alias, alt=npfunc, numeric_only=numeric_only, min_count=min_count, - ) - except DataError: - pass - except NotImplementedError as err: - if "function is not implemented for this dtype" in str( - err - ) or "category dtype not supported" in str(err): - # raised in _get_cython_function, in some cases can - # be trimmed by implementing cython funcs for more dtypes + with _group_selection_context(self): + # try a cython aggregation if we can + try: + return self._cython_agg_general( + how=alias, + alt=npfunc, + numeric_only=numeric_only, + min_count=min_count, + ) + except DataError: pass - else: - raise - - # apply a non-cython aggregation - result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) - return result + except NotImplementedError as err: + if "function is not implemented for this dtype" in str( + err + ) or "category dtype not supported" in str(err): + # raised in _get_cython_function, in some cases can + # be trimmed by implementing cython funcs for more dtypes + pass + + # apply a non-cython aggregation + result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) + return result def _cython_agg_general( self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 @@ -1940,29 +1939,31 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra nth_values = list(set(n)) nth_array = np.array(nth_values, dtype=np.intp) - self._set_group_selection() + with _group_selection_context(self): - mask_left = np.in1d(self._cumcount_array(), nth_array) - mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_array) - mask = mask_left | mask_right + mask_left = np.in1d(self._cumcount_array(), nth_array) + mask_right = np.in1d( + self._cumcount_array(ascending=False) + 1, -nth_array + ) + mask = mask_left | mask_right - ids, _, _ = self.grouper.group_info + ids, _, _ = self.grouper.group_info - # Drop NA values in grouping - mask = mask & (ids != -1) + # Drop NA values in grouping + mask = mask & (ids != -1) - out = self._selected_obj[mask] - if not self.as_index: - return out + out = self._selected_obj[mask] + if not self.as_index: + return out - result_index = self.grouper.result_index - out.index = result_index[ids[mask]] + result_index = self.grouper.result_index + out.index = result_index[ids[mask]] - if not self.observed and isinstance(result_index, CategoricalIndex): - out = out.reindex(result_index) + if not self.observed and isinstance(result_index, CategoricalIndex): + out = out.reindex(result_index) - out = self._reindex_output(out) - return out.sort_index() if self.sort else out + out = self._reindex_output(out) + return out.sort_index() if self.sort else out # dropna is truthy if isinstance(n, valid_containers): diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index 264cf40dc6984..e8cd6017a117c 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -486,13 +486,13 @@ def test_agg_timezone_round_trip(): assert ts == grouped.first()["B"].iloc[0] # GH#27110 applying iloc should return a DataFrame - assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 0] + assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1] ts = df["B"].iloc[2] assert ts == grouped.last()["B"].iloc[0] # GH#27110 applying iloc should return a DataFrame - assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 0] + assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1] def test_sum_uint64_overflow(): diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 665cd12225ad7..ee38722ffb8ce 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -1009,6 +1009,35 @@ def test_apply_with_timezones_aware(): tm.assert_frame_equal(result1, result2) +def test_apply_is_unchanged_when_other_methods_are_called_first(reduction_func): + # GH #34656 + # GH #34271 + df = DataFrame( + { + "a": [99, 99, 99, 88, 88, 88], + "b": [1, 2, 3, 4, 5, 6], + "c": [10, 20, 30, 40, 50, 60], + } + ) + + expected = pd.DataFrame( + {"a": [264, 297], "b": [15, 6], "c": [150, 60]}, + index=pd.Index([88, 99], name="a"), + ) + + # Check output when no other methods are called before .apply() + grp = df.groupby(by="a") + result = grp.apply(sum) + tm.assert_frame_equal(result, expected) + + # Check output when another method is called before .apply() + grp = df.groupby(by="a") + args = {"nth": [0], "corrwith": [df]}.get(reduction_func, []) + _ = getattr(grp, reduction_func)(*args) + result = grp.apply(sum) + tm.assert_frame_equal(result, expected) + + def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp(): # GH 29617 diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index efcd22f9c0c82..40b4ce46e550b 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -191,13 +191,15 @@ def test_grouper_creation_bug(self): result = g.sum() tm.assert_frame_equal(result, expected) - result = g.apply(lambda x: x.sum()) - tm.assert_frame_equal(result, expected) - g = df.groupby(pd.Grouper(key="A", axis=0)) result = g.sum() tm.assert_frame_equal(result, expected) + result = g.apply(lambda x: x.sum()) + expected["A"] = [0, 2, 4] + expected = expected.loc[:, ["A", "B"]] + tm.assert_frame_equal(result, expected) + # GH14334 # pd.Grouper(key=...) may be passed in a list df = DataFrame( From 9843926e39dee88f689afc51cbf0f2fdc232dcd3 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Fri, 7 Aug 2020 12:58:23 -0400 Subject: [PATCH 230/632] CLN: clarify TypeError for IndexSlice argument to pd.xs (#35411) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/generic.py | 5 ++++- pandas/tests/indexing/multiindex/test_xs.py | 23 ++++++++++++++++++++- 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 5a4fa3150e344..15616f4a6f27c 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -134,7 +134,7 @@ Missing MultiIndex ^^^^^^^^^^ -- +- Bug in :meth:`DataFrame.xs` when used with :class:`IndexSlice` raises ``TypeError`` with message `Expected label or tuple of labels` (:issue:`35301`) - I/O diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6fd55c58ece40..843b602a12823 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3492,7 +3492,10 @@ class animal locomotion index = self.index if isinstance(index, MultiIndex): - loc, new_index = self.index.get_loc_level(key, drop_level=drop_level) + try: + loc, new_index = self.index.get_loc_level(key, drop_level=drop_level) + except TypeError as e: + raise TypeError(f"Expected label or tuple of labels, got {key}") from e else: loc = self.index.get_loc(key) diff --git a/pandas/tests/indexing/multiindex/test_xs.py b/pandas/tests/indexing/multiindex/test_xs.py index b807795b9c309..91be1d913001b 100644 --- a/pandas/tests/indexing/multiindex/test_xs.py +++ b/pandas/tests/indexing/multiindex/test_xs.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range +from pandas import DataFrame, Index, IndexSlice, MultiIndex, Series, concat, date_range import pandas._testing as tm import pandas.core.common as com @@ -220,6 +220,27 @@ def test_xs_level_series_slice_not_implemented( s[2000, 3:4] +def test_xs_IndexSlice_argument_not_implemented(): + # GH 35301 + + index = MultiIndex( + levels=[[("foo", "bar", 0), ("foo", "baz", 0), ("foo", "qux", 0)], [0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + ) + + series = Series(np.random.randn(6), index=index) + frame = DataFrame(np.random.randn(6, 4), index=index) + + msg = ( + "Expected label or tuple of labels, got " + r"\(\('foo', 'qux', 0\), slice\(None, None, None\)\)" + ) + with pytest.raises(TypeError, match=msg): + frame.xs(IndexSlice[("foo", "qux", 0), :]) + with pytest.raises(TypeError, match=msg): + series.xs(IndexSlice[("foo", "qux", 0), :]) + + def test_series_getitem_multiindex_xs(): # GH6258 dt = list(date_range("20130903", periods=3)) From 1922ec414d50f88fb1db7d219159222f4dc89674 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Fri, 7 Aug 2020 11:09:30 -0700 Subject: [PATCH 231/632] BUG: Ensure rolling groupby doesn't segfault with center=True (#35562) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/window/indexers.py | 6 +++ pandas/tests/window/test_grouper.py | 65 +++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 6b315e0a9d016..a044a4aab284e 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -18,6 +18,7 @@ Fixed regressions - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) +- Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index 0898836ed2e0e..bc36bdca982e8 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -319,4 +319,10 @@ def get_window_bounds( end_arrays.append(window_indicies.take(end)) start = np.concatenate(start_arrays) end = np.concatenate(end_arrays) + # GH 35552: Need to adjust start and end based on the nans appended to values + # when center=True + if num_values > len(start): + offset = num_values - len(start) + start = np.concatenate([start, np.array([end[-1]] * offset)]) + end = np.concatenate([end, np.array([end[-1]] * offset)]) return start, end diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index ca5a9eccea4f5..5241b9548a442 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -215,6 +215,71 @@ def foo(x): ) tm.assert_series_equal(result, expected) + def test_groupby_rolling_center_center(self): + # GH 35552 + series = Series(range(1, 6)) + result = series.groupby(series).rolling(center=True, window=3).mean() + expected = Series( + [np.nan] * 5, + index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))), + ) + tm.assert_series_equal(result, expected) + + series = Series(range(1, 5)) + result = series.groupby(series).rolling(center=True, window=3).mean() + expected = Series( + [np.nan] * 4, + index=pd.MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))), + ) + tm.assert_series_equal(result, expected) + + df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)}) + result = df.groupby("a").rolling(center=True, window=3).mean() + expected = pd.DataFrame( + [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan], + index=pd.MultiIndex.from_tuples( + ( + ("a", 0), + ("a", 1), + ("a", 2), + ("a", 3), + ("a", 4), + ("b", 5), + ("b", 6), + ("b", 7), + ("b", 8), + ("b", 9), + ("b", 10), + ), + names=["a", None], + ), + columns=["b"], + ) + tm.assert_frame_equal(result, expected) + + df = pd.DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)}) + result = df.groupby("a").rolling(center=True, window=3).mean() + expected = pd.DataFrame( + [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan], + index=pd.MultiIndex.from_tuples( + ( + ("a", 0), + ("a", 1), + ("a", 2), + ("a", 3), + ("a", 4), + ("b", 5), + ("b", 6), + ("b", 7), + ("b", 8), + ("b", 9), + ), + names=["a", None], + ), + columns=["b"], + ) + tm.assert_frame_equal(result, expected) + def test_groupby_subselect_rolling(self): # GH 35486 df = DataFrame( From 23dcab58412900954de7dee1ab1bbdf257c903c9 Mon Sep 17 00:00:00 2001 From: Deepak Pandey <40865954+freakypandit@users.noreply.github.com> Date: Sat, 8 Aug 2020 00:46:39 +0530 Subject: [PATCH 232/632] DOC: Docstring updated for DataFrame.equals (#34508) Co-authored-by: Joris Van den Bossche --- pandas/core/generic.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 843b602a12823..87f25f578c3c6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1201,9 +1201,11 @@ def equals(self, other): This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in - the same location are considered equal. The column headers do not - need to have the same type, but the elements within the columns must - be the same dtype. + the same location are considered equal. + + The row/column index do not need to have the same type, as long + as the values are considered equal. Corresponding columns must be of + the same dtype. Parameters ---------- @@ -1232,13 +1234,6 @@ def equals(self, other): numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. - Notes - ----- - This function requires that the elements have the same dtype as their - respective elements in the other Series or DataFrame. However, the - column labels do not need to have the same type, as long as they are - still considered equal. - Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) From 319a6d3f0d93c372bd53498168b957aaf5daa174 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 7 Aug 2020 12:35:19 -0700 Subject: [PATCH 233/632] REF: use unpack_zerodim_and_defer on EA methods (#34042) --- pandas/core/arrays/base.py | 4 ++-- pandas/core/arrays/boolean.py | 19 ++++--------------- pandas/core/arrays/numpy_.py | 9 +++------ pandas/core/arrays/string_.py | 8 +++----- pandas/tests/extension/base/ops.py | 15 +++++++++++---- pandas/tests/extension/test_period.py | 6 +++++- 6 files changed, 28 insertions(+), 33 deletions(-) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 2553a65aed07b..921927325a144 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -22,7 +22,7 @@ from pandas.core.dtypes.cast import maybe_cast_to_extension_array from pandas.core.dtypes.common import is_array_like, is_list_like, pandas_dtype from pandas.core.dtypes.dtypes import ExtensionDtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -1273,7 +1273,7 @@ def convert_values(param): ovalues = [param] * len(self) return ovalues - if isinstance(other, (ABCSeries, ABCIndexClass)): + if isinstance(other, (ABCSeries, ABCIndexClass, ABCDataFrame)): # rely on pandas to unbox and dispatch to us return NotImplemented diff --git a/pandas/core/arrays/boolean.py b/pandas/core/arrays/boolean.py index dbce71b77a425..bd4bdc5ecb46f 100644 --- a/pandas/core/arrays/boolean.py +++ b/pandas/core/arrays/boolean.py @@ -20,7 +20,6 @@ pandas_dtype, ) from pandas.core.dtypes.dtypes import register_extension_dtype -from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -559,13 +558,10 @@ def all(self, skipna: bool = True, **kwargs): @classmethod def _create_logical_method(cls, op): + @ops.unpack_zerodim_and_defer(op.__name__) def logical_method(self, other): - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - # Rely on pandas to unbox and dispatch to us. - return NotImplemented assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"} - other = lib.item_from_zerodim(other) other_is_booleanarray = isinstance(other, BooleanArray) other_is_scalar = lib.is_scalar(other) mask = None @@ -605,16 +601,14 @@ def logical_method(self, other): @classmethod def _create_comparison_method(cls, op): + @ops.unpack_zerodim_and_defer(op.__name__) def cmp_method(self, other): from pandas.arrays import IntegerArray - if isinstance( - other, (ABCDataFrame, ABCSeries, ABCIndexClass, IntegerArray) - ): + if isinstance(other, IntegerArray): # Rely on pandas to unbox and dispatch to us. return NotImplemented - other = lib.item_from_zerodim(other) mask = None if isinstance(other, BooleanArray): @@ -693,13 +687,8 @@ def _maybe_mask_result(self, result, mask, other, op_name: str): def _create_arithmetic_method(cls, op): op_name = op.__name__ + @ops.unpack_zerodim_and_defer(op_name) def boolean_arithmetic_method(self, other): - - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - # Rely on pandas to unbox and dispatch to us. - return NotImplemented - - other = lib.item_from_zerodim(other) mask = None if isinstance(other, BooleanArray): diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index f6dfb1f0f1e62..05f901518d82f 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -11,12 +11,11 @@ from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.dtypes import ExtensionDtype -from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import isna from pandas import compat -from pandas.core import nanops +from pandas.core import nanops, ops from pandas.core.algorithms import searchsorted from pandas.core.array_algos import masked_reductions from pandas.core.arrays._mixins import NDArrayBackedExtensionArray @@ -436,11 +435,9 @@ def __invert__(self): @classmethod def _create_arithmetic_method(cls, op): + @ops.unpack_zerodim_and_defer(op.__name__) def arithmetic_method(self, other): - if isinstance(other, (ABCIndexClass, ABCSeries)): - return NotImplemented - - elif isinstance(other, cls): + if isinstance(other, cls): other = other._ndarray with np.errstate(all="ignore"): diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index fddd3af858f77..bb55c3cdea45c 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -7,7 +7,6 @@ from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype from pandas.core.dtypes.common import pandas_dtype -from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.inference import is_array_like from pandas import compat @@ -312,15 +311,14 @@ def memory_usage(self, deep=False): @classmethod def _create_arithmetic_method(cls, op): # Note: this handles both arithmetic and comparison methods. + + @ops.unpack_zerodim_and_defer(op.__name__) def method(self, other): from pandas.arrays import BooleanArray assert op.__name__ in ops.ARITHMETIC_BINOPS | ops.COMPARISON_BINOPS - if isinstance(other, (ABCIndexClass, ABCSeries, ABCDataFrame)): - return NotImplemented - - elif isinstance(other, cls): + if isinstance(other, cls): other = other._ndarray mask = isna(self) | isna(other) diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index 359acf230ce14..c93603398977e 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -114,10 +114,13 @@ def test_error(self, data, all_arithmetic_operators): with pytest.raises(AttributeError): getattr(data, op_name) - def test_direct_arith_with_series_returns_not_implemented(self, data): - # EAs should return NotImplemented for ops with Series. + @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) + def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): + # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) + if box is pd.DataFrame: + other = other.to_frame() if hasattr(data, "__add__"): result = data.__add__(other) assert result is NotImplemented @@ -156,10 +159,14 @@ def test_compare_array(self, data, all_compare_operators): other = pd.Series([data[0]] * len(data)) self._compare_other(s, data, op_name, other) - def test_direct_arith_with_series_returns_not_implemented(self, data): - # EAs should return NotImplemented for ops with Series. + @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) + def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): + # EAs should return NotImplemented for ops with Series/DataFrame # Pandas takes care of unboxing the series and calling the EA's op. other = pd.Series(data) + if box is pd.DataFrame: + other = other.to_frame() + if hasattr(data, "__eq__"): result = data.__eq__(other) assert result is NotImplemented diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py index b1eb276bfc227..817881e00fa99 100644 --- a/pandas/tests/extension/test_period.py +++ b/pandas/tests/extension/test_period.py @@ -126,9 +126,13 @@ def test_add_series_with_extension_array(self, data): def test_error(self): pass - def test_direct_arith_with_series_returns_not_implemented(self, data): + @pytest.mark.parametrize("box", [pd.Series, pd.DataFrame]) + def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box): # Override to use __sub__ instead of __add__ other = pd.Series(data) + if box is pd.DataFrame: + other = other.to_frame() + result = data.__sub__(other) assert result is NotImplemented From 067f86f659db3f7021957f935dd5d00bf6eec1aa Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 7 Aug 2020 14:03:00 -0700 Subject: [PATCH 234/632] PERF: BlockManager.equals blockwise (#35357) --- pandas/core/dtypes/missing.py | 14 ++++++- pandas/core/internals/managers.py | 27 ++------------ pandas/core/internals/ops.py | 61 ++++++++++++++++++++++--------- 3 files changed, 61 insertions(+), 41 deletions(-) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 8551ce9f14e6c..f59bb31af2828 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -10,7 +10,7 @@ from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import NaT, iNaT -from pandas._typing import DtypeObj +from pandas._typing import ArrayLike, DtypeObj from pandas.core.dtypes.common import ( DT64NS_DTYPE, @@ -484,6 +484,18 @@ def _array_equivalent_object(left, right, strict_nan): return True +def array_equals(left: ArrayLike, right: ArrayLike) -> bool: + """ + ExtensionArray-compatible implementation of array_equivalent. + """ + if not is_dtype_equal(left.dtype, right.dtype): + return False + elif isinstance(left, ABCExtensionArray): + return left.equals(right) + else: + return array_equivalent(left, right, dtype_equal=True) + + def _infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 4693cc193c27c..4b85f92391dce 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -19,7 +19,6 @@ from pandas.core.dtypes.common import ( DT64NS_DTYPE, is_datetimelike_v_numeric, - is_dtype_equal, is_extension_array_dtype, is_list_like, is_numeric_v_string_like, @@ -28,10 +27,9 @@ from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries -from pandas.core.dtypes.missing import array_equivalent, isna +from pandas.core.dtypes.missing import array_equals, isna import pandas.core.algorithms as algos -from pandas.core.arrays import ExtensionArray from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject import pandas.core.common as com @@ -49,7 +47,7 @@ get_block_type, make_block, ) -from pandas.core.internals.ops import operate_blockwise +from pandas.core.internals.ops import blockwise_all, operate_blockwise # TODO: flexible with index=None and/or items=None @@ -1449,26 +1447,9 @@ def equals(self, other: "BlockManager") -> bool: return False left = self.blocks[0].values right = other.blocks[0].values - if not is_dtype_equal(left.dtype, right.dtype): - return False - elif isinstance(left, ExtensionArray): - return left.equals(right) - else: - return array_equivalent(left, right) + return array_equals(left, right) - for i in range(len(self.items)): - # Check column-wise, return False if any column doesn't match - left = self.iget_values(i) - right = other.iget_values(i) - if not is_dtype_equal(left.dtype, right.dtype): - return False - elif isinstance(left, ExtensionArray): - if not left.equals(right): - return False - else: - if not array_equivalent(left, right, dtype_equal=True): - return False - return True + return blockwise_all(self, other, array_equals) def unstack(self, unstacker, fill_value) -> "BlockManager": """ diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index 6eedf72726acb..ae4892c720d5b 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -1,4 +1,5 @@ -from typing import TYPE_CHECKING, List, Tuple +from collections import namedtuple +from typing import TYPE_CHECKING, Iterator, List, Tuple import numpy as np @@ -9,13 +10,17 @@ from pandas.core.internals.managers import BlockManager # noqa:F401 -def operate_blockwise( - left: "BlockManager", right: "BlockManager", array_op -) -> "BlockManager": +BlockPairInfo = namedtuple( + "BlockPairInfo", ["lvals", "rvals", "locs", "left_ea", "right_ea", "rblk"], +) + + +def _iter_block_pairs( + left: "BlockManager", right: "BlockManager" +) -> Iterator[BlockPairInfo]: # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) - res_blks: List["Block"] = [] for n, blk in enumerate(left.blocks): locs = blk.mgr_locs blk_vals = blk.values @@ -34,21 +39,32 @@ def operate_blockwise( right_ea = not isinstance(rblk.values, np.ndarray) lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) + info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) + yield info - res_values = array_op(lvals, rvals) - if left_ea and not right_ea and hasattr(res_values, "reshape"): - res_values = res_values.reshape(1, -1) - nbs = rblk._split_op_result(res_values) - # Assertions are disabled for performance, but should hold: - # if right_ea or left_ea: - # assert len(nbs) == 1 - # else: - # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape) +def operate_blockwise( + left: "BlockManager", right: "BlockManager", array_op +) -> "BlockManager": + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + res_blks: List["Block"] = [] + for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): + res_values = array_op(lvals, rvals) + if left_ea and not right_ea and hasattr(res_values, "reshape"): + res_values = res_values.reshape(1, -1) + nbs = rblk._split_op_result(res_values) + + # Assertions are disabled for performance, but should hold: + # if right_ea or left_ea: + # assert len(nbs) == 1 + # else: + # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape) - _reset_block_mgr_locs(nbs, locs) + _reset_block_mgr_locs(nbs, locs) - res_blks.extend(nbs) + res_blks.extend(nbs) # Assertions are disabled for performance, but should hold: # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array} @@ -85,7 +101,7 @@ def _get_same_shape_values( # Require that the indexing into lvals be slice-like assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs - # TODO(EA2D): with 2D EAs pnly this first clause would be needed + # TODO(EA2D): with 2D EAs only this first clause would be needed if not (left_ea or right_ea): lvals = lvals[rblk.mgr_locs.indexer, :] assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) @@ -102,3 +118,14 @@ def _get_same_shape_values( rvals = rvals[0, :] return lvals, rvals + + +def blockwise_all(left: "BlockManager", right: "BlockManager", op) -> bool: + """ + Blockwise `all` reduction. + """ + for info in _iter_block_pairs(left, right): + res = op(info.lvals, info.rvals) + if not res: + return False + return True From f194094f51ce44f3a759058720c7b63291f419af Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Fri, 7 Aug 2020 17:33:05 -0400 Subject: [PATCH 235/632] BUG: DataFrameGroupBy.__getitem__ fails to propagate dropna (#35078) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/groupby/generic.py | 18 +++++------ pandas/tests/groupby/test_groupby_dropna.py | 34 +++++++++++++++++++++ 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 15616f4a6f27c..74ef5178eb004 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -128,7 +128,7 @@ Indexing Missing ^^^^^^^ -- +- Bug in :meth:`SeriesGroupBy.transform` now correctly handles missing values for `dropna=False` (:issue:`35014`) - MultiIndex diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 740463f0cf356..1fed193dba02c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -35,11 +35,11 @@ from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.cast import ( + find_common_type, maybe_cast_result, maybe_cast_result_dtype, maybe_convert_objects, maybe_downcast_numeric, - maybe_downcast_to_dtype, ) from pandas.core.dtypes.common import ( ensure_int64, @@ -513,7 +513,6 @@ def _transform_general( """ Transform with a non-str `func`. """ - if maybe_use_numba(engine): numba_func, cache_key = generate_numba_func( func, engine_kwargs, kwargs, "groupby_transform" @@ -535,24 +534,23 @@ def _transform_general( if isinstance(res, (ABCDataFrame, ABCSeries)): res = res._values - indexer = self._get_index(name) - ser = klass(res, indexer) - results.append(ser) + results.append(klass(res, index=group.index)) # check for empty "results" to avoid concat ValueError if results: from pandas.core.reshape.concat import concat - result = concat(results).sort_index() + concatenated = concat(results) + result = self._set_result_index_ordered(concatenated) else: result = self.obj._constructor(dtype=np.float64) - # we will only try to coerce the result type if # we have a numeric dtype, as these are *always* user-defined funcs # the cython take a different path (and casting) - dtype = self._selected_obj.dtype - if is_numeric_dtype(dtype): - result = maybe_downcast_to_dtype(result, dtype) + if is_numeric_dtype(result.dtype): + common_dtype = find_common_type([self._selected_obj.dtype, result.dtype]) + if common_dtype is result.dtype: + result = maybe_downcast_numeric(result, self._selected_obj.dtype) result.name = self._selected_obj.name result.index = self._selected_obj.index diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index 1a525d306e9f5..adf62c4723526 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -162,6 +162,40 @@ def test_groupby_dropna_series_by(dropna, expected): tm.assert_series_equal(result, expected) +@pytest.mark.parametrize( + "dropna,df_expected,s_expected", + [ + pytest.param( + True, + pd.DataFrame({"B": [2, 2, 1]}), + pd.Series(data=[2, 2, 1], name="B"), + marks=pytest.mark.xfail(raises=ValueError), + ), + ( + False, + pd.DataFrame({"B": [2, 2, 1, 1]}), + pd.Series(data=[2, 2, 1, 1], name="B"), + ), + ], +) +def test_slice_groupby_then_transform(dropna, df_expected, s_expected): + # GH35014 + + df = pd.DataFrame({"A": [0, 0, 1, None], "B": [1, 2, 3, None]}) + gb = df.groupby("A", dropna=dropna) + + res = gb.transform(len) + tm.assert_frame_equal(res, df_expected) + + gb_slice = gb[["B"]] + res = gb_slice.transform(len) + tm.assert_frame_equal(res, df_expected) + + gb_slice = gb["B"] + res = gb["B"].transform(len) + tm.assert_series_equal(res, s_expected) + + @pytest.mark.parametrize( "dropna, tuples, outputs", [ From 16bd49d8a7e4e28acac3b64f1923bd07694bc963 Mon Sep 17 00:00:00 2001 From: attack68 <24256554+attack68@users.noreply.github.com> Date: Fri, 7 Aug 2020 23:35:21 +0200 Subject: [PATCH 236/632] BUG: fix styler cell_ids arg so that blank style is ignored on False (#35588) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/io/formats/style.py | 2 +- pandas/tests/io/formats/test_style.py | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index a044a4aab284e..ade88a6127014 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -27,6 +27,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`). Categorical ^^^^^^^^^^^ diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index fd1efa2d1b668..584f42a6cab12 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -390,7 +390,7 @@ def format_attr(pair): "is_visible": (c not in hidden_columns), } # only add an id if the cell has a style - if self.cell_ids or not (len(ctx[r, c]) == 1 and ctx[r, c][0] == ""): + if self.cell_ids or (r, c) in ctx: row_dict["id"] = "_".join(cs[1:]) row_es.append(row_dict) props = [] diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 9c6910637fa7e..3ef5157655e78 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1682,6 +1682,12 @@ def f(a, b, styler): result = styler.pipe((f, "styler"), a=1, b=2) assert result == (1, 2, styler) + def test_no_cell_ids(self): + # GH 35588 + df = pd.DataFrame(data=[[0]]) + s = Styler(df, uuid="_", cell_ids=False).render() + assert s.find('' in s.render() + @td.skip_if_no_mpl class TestStylerMatplotlibDep: From b1d3897c2e3d3174a648721ffe4ad045bfa7d51f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 8 Sep 2020 17:55:21 -0700 Subject: [PATCH 488/632] REF: pass setitem to unbox_scalar to de-duplicate validation (#36234) --- pandas/core/arrays/datetimelike.py | 20 +++++++++++--------- pandas/core/arrays/datetimes.py | 4 ++-- pandas/core/arrays/period.py | 6 ++++-- pandas/core/arrays/timedeltas.py | 4 ++-- pandas/tests/arrays/test_datetimelike.py | 2 +- 5 files changed, 20 insertions(+), 16 deletions(-) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ba5bfc108f16b..bb40cf78ea006 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -183,7 +183,7 @@ def _rebox_native(cls, value: int) -> Union[int, np.datetime64, np.timedelta64]: """ raise AbstractMethodError(cls) - def _unbox_scalar(self, value: DTScalarOrNaT) -> int: + def _unbox_scalar(self, value: DTScalarOrNaT, setitem: bool = False) -> int: """ Unbox the integer value of a scalar `value`. @@ -191,6 +191,8 @@ def _unbox_scalar(self, value: DTScalarOrNaT) -> int: ---------- value : Period, Timestamp, Timedelta, or NaT Depending on subclass. + setitem : bool, default False + Whether to check compatiblity with setitem strictness. Returns ------- @@ -841,6 +843,7 @@ def _validate_listlike( if is_dtype_equal(value.categories.dtype, self.dtype): # TODO: do we need equal dtype or just comparable? value = value._internal_get_values() + value = extract_array(value, extract_numpy=True) if allow_object and is_object_dtype(value.dtype): pass @@ -875,8 +878,7 @@ def _validate_setitem_value(self, value): # TODO: cast_str for consistency? value = self._validate_scalar(value, msg, cast_str=False) - self._check_compatible_with(value, setitem=True) - return self._unbox(value) + return self._unbox(value, setitem=True) def _validate_insert_value(self, value): msg = f"cannot insert {type(self).__name__} with incompatible label" @@ -886,6 +888,8 @@ def _validate_insert_value(self, value): # TODO: if we dont have compat, should we raise or astype(object)? # PeriodIndex does astype(object) return value + # Note: we do not unbox here because the caller needs boxed value + # to check for freq. def _validate_where_value(self, other): msg = f"Where requires matching dtype, not {type(other)}" @@ -893,20 +897,18 @@ def _validate_where_value(self, other): other = self._validate_scalar(other, msg) else: other = self._validate_listlike(other, "where") - self._check_compatible_with(other, setitem=True) - self._check_compatible_with(other, setitem=True) - return self._unbox(other) + return self._unbox(other, setitem=True) - def _unbox(self, other) -> Union[np.int64, np.ndarray]: + def _unbox(self, other, setitem: bool = False) -> Union[np.int64, np.ndarray]: """ Unbox either a scalar with _unbox_scalar or an instance of our own type. """ if lib.is_scalar(other): - other = self._unbox_scalar(other) + other = self._unbox_scalar(other, setitem=setitem) else: # same type as self - self._check_compatible_with(other) + self._check_compatible_with(other, setitem=setitem) other = other.view("i8") return other diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 9f10cc84dcfcc..56e0a9861548f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -451,11 +451,11 @@ def _generate_range( def _rebox_native(cls, value: int) -> np.datetime64: return np.int64(value).view("M8[ns]") - def _unbox_scalar(self, value): + def _unbox_scalar(self, value, setitem: bool = False): if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timestamp.") if not isna(value): - self._check_compatible_with(value) + self._check_compatible_with(value, setitem=setitem) return value.value def _scalar_from_string(self, value): diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index eea11bde77030..865b1680c008a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -257,11 +257,13 @@ def _generate_range(cls, start, end, periods, freq, fields): def _rebox_native(cls, value: int) -> np.int64: return np.int64(value) - def _unbox_scalar(self, value: Union[Period, NaTType]) -> int: + def _unbox_scalar( + self, value: Union[Period, NaTType], setitem: bool = False + ) -> int: if value is NaT: return value.value elif isinstance(value, self._scalar_type): - self._check_compatible_with(value) + self._check_compatible_with(value, setitem=setitem) return value.ordinal else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 5e3c0f2b8d876..3eaf428bc64b2 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -283,10 +283,10 @@ def _generate_range(cls, start, end, periods, freq, closed=None): def _rebox_native(cls, value: int) -> np.timedelta64: return np.int64(value).view("m8[ns]") - def _unbox_scalar(self, value): + def _unbox_scalar(self, value, setitem: bool = False): if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timedelta.") - self._check_compatible_with(value) + self._check_compatible_with(value, setitem=setitem) return value.value def _scalar_from_string(self, value): diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index d2d3766959fbf..9d316c38082af 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -244,7 +244,7 @@ def test_searchsorted(self): # GH#29884 match numpy convention on whether NaT goes # at the end or the beginning result = arr.searchsorted(pd.NaT) - if _np_version_under1p18 or self.array_cls is PeriodArray: + if np_version_under1p18 or self.array_cls is PeriodArray: # Following numpy convention, NaT goes at the beginning # (unlike NaN which goes at the end) assert result == 0 From b5a5268dabb2a4dea1c3c543a1ddff501b87a447 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 8 Sep 2020 18:33:41 -0700 Subject: [PATCH 489/632] STY: De-privatize imported names (#36235) --- pandas/_libs/interval.pyx | 4 +- pandas/core/arrays/_arrow_utils.py | 4 +- pandas/core/arrays/interval.py | 12 +++-- pandas/core/arrays/sparse/__init__.py | 2 +- pandas/core/arrays/sparse/array.py | 4 +- pandas/core/computation/engines.py | 2 +- pandas/core/computation/eval.py | 14 ++--- pandas/core/computation/expr.py | 4 +- pandas/core/config_init.py | 4 +- pandas/core/groupby/generic.py | 10 ++-- pandas/core/groupby/groupby.py | 20 +++---- pandas/core/indexes/base.py | 4 +- pandas/core/indexes/interval.py | 1 - pandas/core/reshape/merge.py | 4 +- pandas/io/formats/printing.py | 2 +- pandas/tests/arrays/sparse/test_libsparse.py | 56 +++++++++++--------- pandas/tests/computation/test_compat.py | 6 +-- pandas/tests/computation/test_eval.py | 12 ++--- 18 files changed, 88 insertions(+), 77 deletions(-) diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 931ad8326c371..f8bcbcfb158b5 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -46,7 +46,7 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, ) -_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) +VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) cdef class IntervalMixin: @@ -318,7 +318,7 @@ cdef class Interval(IntervalMixin): self._validate_endpoint(left) self._validate_endpoint(right) - if closed not in _VALID_CLOSED: + if closed not in VALID_CLOSED: raise ValueError(f"invalid option for 'closed': {closed}") if not left <= right: raise ValueError("left side of interval must be <= right side") diff --git a/pandas/core/arrays/_arrow_utils.py b/pandas/core/arrays/_arrow_utils.py index 4a33e0e841f7f..c89f5554d0715 100644 --- a/pandas/core/arrays/_arrow_utils.py +++ b/pandas/core/arrays/_arrow_utils.py @@ -4,7 +4,7 @@ import numpy as np import pyarrow -from pandas.core.arrays.interval import _VALID_CLOSED +from pandas.core.arrays.interval import VALID_CLOSED _pyarrow_version_ge_015 = LooseVersion(pyarrow.__version__) >= LooseVersion("0.15") @@ -83,7 +83,7 @@ class ArrowIntervalType(pyarrow.ExtensionType): def __init__(self, subtype, closed): # attributes need to be set first before calling # super init (as that calls serialize) - assert closed in _VALID_CLOSED + assert closed in VALID_CLOSED self._closed = closed if not isinstance(subtype, pyarrow.DataType): subtype = pyarrow.type_for_alias(str(subtype)) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index d76e0fd628a48..1dbd3cfc6dca6 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -5,7 +5,12 @@ from pandas._config import get_option -from pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds +from pandas._libs.interval import ( + VALID_CLOSED, + Interval, + IntervalMixin, + intervals_to_interval_bounds, +) from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender @@ -42,7 +47,6 @@ from pandas.core.indexers import check_array_indexer from pandas.core.indexes.base import ensure_index -_VALID_CLOSED = {"left", "right", "both", "neither"} _interval_shared_docs = {} _shared_docs_kwargs = dict( @@ -475,7 +479,7 @@ def _validate(self): * left and right have the same missing values * left is always below right """ - if self.closed not in _VALID_CLOSED: + if self.closed not in VALID_CLOSED: msg = f"invalid option for 'closed': {self.closed}" raise ValueError(msg) if len(self.left) != len(self.right): @@ -1012,7 +1016,7 @@ def closed(self): ) ) def set_closed(self, closed): - if closed not in _VALID_CLOSED: + if closed not in VALID_CLOSED: msg = f"invalid option for 'closed': {closed}" raise ValueError(msg) diff --git a/pandas/core/arrays/sparse/__init__.py b/pandas/core/arrays/sparse/__init__.py index e928db499a771..e9ff4b7d4ffc2 100644 --- a/pandas/core/arrays/sparse/__init__.py +++ b/pandas/core/arrays/sparse/__init__.py @@ -5,6 +5,6 @@ BlockIndex, IntIndex, SparseArray, - _make_index, + make_sparse_index, ) from pandas.core.arrays.sparse.dtype import SparseDtype diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 47c960dc969d6..853f7bb0b0d81 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1556,7 +1556,7 @@ def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy else: indices = mask.nonzero()[0].astype(np.int32) - index = _make_index(length, indices, kind) + index = make_sparse_index(length, indices, kind) sparsified_values = arr[mask] if dtype is not None: sparsified_values = astype_nansafe(sparsified_values, dtype=dtype) @@ -1564,7 +1564,7 @@ def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy return sparsified_values, index, fill_value -def _make_index(length, indices, kind): +def make_sparse_index(length, indices, kind): if kind == "block" or isinstance(kind, BlockIndex): locs, lens = splib.get_blocks(indices) diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index 0cdc0f530a7f3..77a378369ca34 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -130,7 +130,7 @@ def _evaluate(self) -> None: pass -_engines: Dict[str, Type[AbstractEngine]] = { +ENGINES: Dict[str, Type[AbstractEngine]] = { "numexpr": NumExprEngine, "python": PythonEngine, } diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index f6a7935142a32..630606b4d8111 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -9,8 +9,8 @@ from pandas._libs.lib import no_default from pandas.util._validators import validate_bool_kwarg -from pandas.core.computation.engines import _engines -from pandas.core.computation.expr import Expr, _parsers +from pandas.core.computation.engines import ENGINES +from pandas.core.computation.expr import PARSERS, Expr from pandas.core.computation.parsing import tokenize_string from pandas.core.computation.scope import ensure_scope @@ -43,8 +43,8 @@ def _check_engine(engine: Optional[str]) -> str: if engine is None: engine = "numexpr" if NUMEXPR_INSTALLED else "python" - if engine not in _engines: - valid_engines = list(_engines.keys()) + if engine not in ENGINES: + valid_engines = list(ENGINES.keys()) raise KeyError( f"Invalid engine '{engine}' passed, valid engines are {valid_engines}" ) @@ -75,9 +75,9 @@ def _check_parser(parser: str): KeyError * If an invalid parser is passed """ - if parser not in _parsers: + if parser not in PARSERS: raise KeyError( - f"Invalid parser '{parser}' passed, valid parsers are {_parsers.keys()}" + f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}" ) @@ -341,7 +341,7 @@ def eval( parsed_expr = Expr(expr, engine=engine, parser=parser, env=env) # construct the engine and evaluate the parsed expression - eng = _engines[engine] + eng = ENGINES[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 8cff6abc071ca..f5897277d83bf 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -782,7 +782,7 @@ def __init__( self.env = env or Scope(level=level + 1) self.engine = engine self.parser = parser - self._visitor = _parsers[parser](self.env, self.engine, self.parser) + self._visitor = PARSERS[parser](self.env, self.engine, self.parser) self.terms = self.parse() @property @@ -814,4 +814,4 @@ def names(self): return frozenset(term.name for term in com.flatten(self.terms)) -_parsers = {"python": PythonExprVisitor, "pandas": PandasExprVisitor} +PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor} diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 0c23f1b4bcdf2..bfe20551cbcfc 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -314,9 +314,9 @@ def use_numba_cb(key): def table_schema_cb(key): - from pandas.io.formats.printing import _enable_data_resource_formatter + from pandas.io.formats.printing import enable_data_resource_formatter - _enable_data_resource_formatter(cf.get_option(key)) + enable_data_resource_formatter(cf.get_option(key)) def is_terminal() -> bool: diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 72003eab24b29..e870187fc7952 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -70,9 +70,9 @@ GroupBy, _agg_template, _apply_docs, - _group_selection_context, _transform_template, get_groupby, + group_selection_context, ) from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same @@ -230,7 +230,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) raise NotImplementedError( "Numba engine can only be used with a single function." ) - with _group_selection_context(self): + with group_selection_context(self): data = self._selected_obj result, index = self._aggregate_with_numba( data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs @@ -685,7 +685,7 @@ def value_counts( self, normalize=False, sort=True, ascending=False, bins=None, dropna=True ): - from pandas.core.reshape.merge import _get_join_indexers + from pandas.core.reshape.merge import get_join_indexers from pandas.core.reshape.tile import cut if bins is not None and not np.iterable(bins): @@ -787,7 +787,7 @@ def value_counts( right = [diff.cumsum() - 1, codes[-1]] - _, idx = _get_join_indexers(left, right, sort=False, how="left") + _, idx = get_join_indexers(left, right, sort=False, how="left") out = np.where(idx != -1, out[idx], 0) if sort: @@ -942,7 +942,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) raise NotImplementedError( "Numba engine can only be used with a single function." ) - with _group_selection_context(self): + with group_selection_context(self): data = self._selected_obj result, index = self._aggregate_with_numba( data, func, *args, engine_kwargs=engine_kwargs, **kwargs diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 6ef2e67030881..1e3e56f4ff09f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -459,9 +459,9 @@ def f(self): @contextmanager -def _group_selection_context(groupby: "_GroupBy"): +def group_selection_context(groupby: "_GroupBy"): """ - Set / reset the _group_selection_context. + Set / reset the group_selection_context. """ groupby._set_group_selection() try: @@ -737,7 +737,7 @@ def pipe(self, func, *args, **kwargs): def _make_wrapper(self, name: str) -> Callable: assert name in self._apply_allowlist - with _group_selection_context(self): + with group_selection_context(self): # need to setup the selection # as are not passed directly but in the grouper f = getattr(self._obj_with_exclusions, name) @@ -868,7 +868,7 @@ def f(g): # fails on *some* columns, e.g. a numeric operation # on a string grouper column - with _group_selection_context(self): + with group_selection_context(self): return self._python_apply_general(f, self._selected_obj) return result @@ -994,7 +994,7 @@ def _agg_general( alias: str, npfunc: Callable, ): - with _group_selection_context(self): + with group_selection_context(self): # try a cython aggregation if we can try: return self._cython_agg_general( @@ -1499,7 +1499,7 @@ def var(self, ddof: int = 1): ) else: func = lambda x: x.var(ddof=ddof) - with _group_selection_context(self): + with group_selection_context(self): return self._python_agg_general(func) @Substitution(name="groupby") @@ -1658,7 +1658,7 @@ def ohlc(self) -> DataFrame: @doc(DataFrame.describe) def describe(self, **kwargs): - with _group_selection_context(self): + with group_selection_context(self): result = self.apply(lambda x: x.describe(**kwargs)) if self.axis == 1: return result.T @@ -1963,7 +1963,7 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra nth_values = list(set(n)) nth_array = np.array(nth_values, dtype=np.intp) - with _group_selection_context(self): + with group_selection_context(self): mask_left = np.in1d(self._cumcount_array(), nth_array) mask_right = np.in1d( @@ -2226,7 +2226,7 @@ def ngroup(self, ascending: bool = True): 5 0 dtype: int64 """ - with _group_selection_context(self): + with group_selection_context(self): index = self._selected_obj.index result = self._obj_1d_constructor(self.grouper.group_info[0], index) if not ascending: @@ -2287,7 +2287,7 @@ def cumcount(self, ascending: bool = True): 5 0 dtype: int64 """ - with _group_selection_context(self): + with group_selection_context(self): index = self._selected_obj.index cumcounts = self._cumcount_array(ascending=ascending) return self._obj_1d_constructor(cumcounts, index) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 526dae7e256b7..8014b16d07b01 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3660,7 +3660,7 @@ def _join_multi(self, other, how, return_indexers=True): return result def _join_non_unique(self, other, how="left", return_indexers=False): - from pandas.core.reshape.merge import _get_join_indexers + from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype @@ -3668,7 +3668,7 @@ def _join_non_unique(self, other, how="left", return_indexers=False): lvalues = self._get_engine_target() rvalues = other._get_engine_target() - left_idx, right_idx = _get_join_indexers( + left_idx, right_idx = get_join_indexers( [lvalues], [rvalues], how=how, sort=True ) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 3f72577c9420e..154f41bf07928 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -59,7 +59,6 @@ if TYPE_CHECKING: from pandas import CategoricalIndex # noqa:F401 -_VALID_CLOSED = {"left", "right", "both", "neither"} _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 030dec369c2be..9f19ea9aefe09 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -859,7 +859,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer): def _get_join_indexers(self): """ return the join indexers """ - return _get_join_indexers( + return get_join_indexers( self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how ) @@ -1298,7 +1298,7 @@ def _validate(self, validate: str): raise ValueError("Not a valid argument for validate") -def _get_join_indexers( +def get_join_indexers( left_keys, right_keys, sort: bool = False, how: str = "inner", **kwargs ): """ diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index edc6fbfff61d7..0d2ca83f1012e 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -243,7 +243,7 @@ def pprint_thing_encoded( return value.encode(encoding, errors) -def _enable_data_resource_formatter(enable: bool) -> None: +def enable_data_resource_formatter(enable: bool) -> None: if "IPython" not in sys.modules: # definitely not in IPython return diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py index a2f861d378e67..2d6e657debdb2 100644 --- a/pandas/tests/arrays/sparse/test_libsparse.py +++ b/pandas/tests/arrays/sparse/test_libsparse.py @@ -8,7 +8,7 @@ from pandas import Series import pandas._testing as tm -from pandas.core.arrays.sparse import BlockIndex, IntIndex, _make_index +from pandas.core.arrays.sparse import BlockIndex, IntIndex, make_sparse_index TEST_LENGTH = 20 @@ -273,41 +273,43 @@ def test_intersect_identical(self): class TestSparseIndexCommon: def test_int_internal(self): - idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") assert isinstance(idx, IntIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) - idx = _make_index(4, np.array([], dtype=np.int32), kind="integer") + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") assert isinstance(idx, IntIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) - idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer") + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) assert isinstance(idx, IntIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) def test_block_internal(self): - idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) - idx = _make_index(4, np.array([], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) - idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) - idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 3 tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) @@ -315,7 +317,7 @@ def test_block_internal(self): def test_lookup(self): for kind in ["integer", "block"]: - idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) assert idx.lookup(-1) == -1 assert idx.lookup(0) == -1 assert idx.lookup(1) == -1 @@ -323,12 +325,14 @@ def test_lookup(self): assert idx.lookup(3) == 1 assert idx.lookup(4) == -1 - idx = _make_index(4, np.array([], dtype=np.int32), kind=kind) + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) for i in range(-1, 5): assert idx.lookup(i) == -1 - idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind + ) assert idx.lookup(-1) == -1 assert idx.lookup(0) == 0 assert idx.lookup(1) == 1 @@ -336,7 +340,7 @@ def test_lookup(self): assert idx.lookup(3) == 3 assert idx.lookup(4) == -1 - idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) assert idx.lookup(-1) == -1 assert idx.lookup(0) == 0 assert idx.lookup(1) == -1 @@ -346,7 +350,7 @@ def test_lookup(self): def test_lookup_array(self): for kind in ["integer", "block"]: - idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) exp = np.array([-1, -1, 0], dtype=np.int32) @@ -356,11 +360,13 @@ def test_lookup_array(self): exp = np.array([-1, 0, -1, 1], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) - idx = _make_index(4, np.array([], dtype=np.int32), kind=kind) + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32)) exp = np.array([-1, -1, -1, -1], dtype=np.int32) - idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind + ) res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) exp = np.array([-1, 0, 2], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) @@ -369,7 +375,7 @@ def test_lookup_array(self): exp = np.array([-1, 2, 1, 3], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) - idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32)) exp = np.array([1, -1, 2, 0], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) @@ -402,25 +408,25 @@ def _check(index): class TestBlockIndex: def test_block_internal(self): - idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) - idx = _make_index(4, np.array([], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) - idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) - idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") assert isinstance(idx, BlockIndex) assert idx.npoints == 3 tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) @@ -428,7 +434,7 @@ def test_block_internal(self): def test_make_block_boundary(self): for i in [5, 10, 100, 101]: - idx = _make_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block") + idx = make_sparse_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block") exp = np.arange(0, i, 2, dtype=np.int32) tm.assert_numpy_array_equal(idx.blocs, exp) @@ -514,17 +520,19 @@ def test_check_integrity(self): IntIndex(length=5, indices=[1, 3, 3]) def test_int_internal(self): - idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") assert isinstance(idx, IntIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) - idx = _make_index(4, np.array([], dtype=np.int32), kind="integer") + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") assert isinstance(idx, IntIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) - idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer") + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) assert isinstance(idx, IntIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index ead102f532a20..9fc3ed4800d09 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -5,7 +5,7 @@ from pandas.compat._optional import VERSIONS import pandas as pd -from pandas.core.computation.engines import _engines +from pandas.core.computation.engines import ENGINES import pandas.core.computation.expr as expr @@ -26,8 +26,8 @@ def test_compat(): pytest.skip("not testing numexpr version compat") -@pytest.mark.parametrize("engine", _engines) -@pytest.mark.parametrize("parser", expr._parsers) +@pytest.mark.parametrize("engine", ENGINES) +@pytest.mark.parametrize("parser", expr.PARSERS) def test_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 # noqa diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 72dc04e68c154..cca64a6bf487c 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -19,7 +19,7 @@ import pandas._testing as tm from pandas.core.computation import pytables from pandas.core.computation.check import NUMEXPR_VERSION -from pandas.core.computation.engines import NumExprClobberingError, _engines +from pandas.core.computation.engines import ENGINES, NumExprClobberingError import pandas.core.computation.expr as expr from pandas.core.computation.expr import ( BaseExprVisitor, @@ -46,14 +46,14 @@ f"installed->{NUMEXPR_INSTALLED}", ), ) - for engine in _engines + for engine in ENGINES ) ) # noqa def engine(request): return request.param -@pytest.fixture(params=expr._parsers) +@pytest.fixture(params=expr.PARSERS) def parser(request): return request.param @@ -77,7 +77,7 @@ def unary_fns_for_ne(): def engine_has_neg_frac(engine): - return _engines[engine].has_neg_frac + return ENGINES[engine].has_neg_frac def _eval_single_bin(lhs, cmp1, rhs, engine): @@ -168,7 +168,7 @@ def setup_ops(self): def setup_method(self, method): self.setup_ops() self.setup_data() - self.current_engines = (engine for engine in _engines if engine != self.engine) + self.current_engines = (engine for engine in ENGINES if engine != self.engine) def teardown_method(self, method): del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses @@ -1921,7 +1921,7 @@ def test_invalid_parser(): } -@pytest.mark.parametrize("engine", _engines) +@pytest.mark.parametrize("engine", ENGINES) @pytest.mark.parametrize("parser", _parsers) def test_disallowed_nodes(engine, parser): VisitorClass = _parsers[parser] From 44e933a765f943c03d72580c05685064ef67e581 Mon Sep 17 00:00:00 2001 From: Maxim Ivanov <41443370+ivanovmg@users.noreply.github.com> Date: Wed, 9 Sep 2020 17:33:10 +0700 Subject: [PATCH 490/632] REF: simplify CSVFormatter (#36046) * REF: extract properties cols and has_mi_columns * REF: extract property chunksize * REF: extract property quotechar * REF: extract properties data_index and nlevels * REF: refactor _save_chunk * REF: refactor _save * REF: extract method _save_body * REF: reorder _save-like methods * REF: extract compression property * REF: Extract property index_label * REF: extract helper properties * REF: delete local variables in _save_header * REF: extract method _get_header_rows * REF: move check for header into _save function * TYP: add several type annotations * FIX: fix index labels * FIX: fix multiindex * FIX: fix test failures on compression Needed to eliminate compression setter due to the interdependencies between ioargs and compression. * REF: eliminate preallocation of self.data * REF: extract method _convert_to_native_types * REF: rename regular -> flat as reviewed * TYP: add type annotations as reviewed * REF: refactor number formatting Replace _convert_to_native_types method in favor of a number formatting dictionary. * FIX: mypy error with index_label * FIX: reorder if-statements in index_label To make sure that the newer mypy (v0.782) passes. * TYP: move IndexLabel to pandas._typing This eliminates repetition of the type annotations for index label in multiple places. * TYP: quotechar, has_mi_columns, _need_to_save... * TYP: chunksize, but ignored assignment check For some reason mypy would not recognize that chunksize turns from Optional[int] to int inside the setter. Even setting an intentional assertion ``assert chunksize is not None`` does not help. * TYP: cols property Limitations: - ignore type[assignment] error. - Created additional method _refine_cols to allow conversion from Optional[Sequence[Label]] to Sequence[Label]. * TYP: nlevels and _has_aliases * CLN: move GH21227 check to pandas/io/common.py * TYP: remove redundant bool from IndexLabel type * TYP: add to _get_index_label... methods * TYP: use Iterator instead of Generator * TYP: explicitly use List type * TYP: correct dict typing * TYP: remaining properties --- pandas/_typing.py | 2 + pandas/core/generic.py | 3 +- pandas/io/common.py | 15 ++ pandas/io/formats/csvs.py | 361 +++++++++++++++++++------------------- 4 files changed, 202 insertions(+), 179 deletions(-) diff --git a/pandas/_typing.py b/pandas/_typing.py index b237013ac7805..7aef5c02e290f 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -15,6 +15,7 @@ List, Mapping, Optional, + Sequence, Type, TypeVar, Union, @@ -82,6 +83,7 @@ Axis = Union[str, int] Label = Optional[Hashable] +IndexLabel = Optional[Union[Label, Sequence[Label]]] Level = Union[Label, int] Ordered = Optional[bool] JSONSerializable = Optional[Union[PythonScalar, List, Dict]] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 40f0c6200e835..fffd2e068ebcf 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -40,6 +40,7 @@ CompressionOptions, FilePathOrBuffer, FrameOrSeries, + IndexLabel, JSONSerializable, Label, Level, @@ -3160,7 +3161,7 @@ def to_csv( columns: Optional[Sequence[Label]] = None, header: Union[bool_t, List[str]] = True, index: bool_t = True, - index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None, + index_label: IndexLabel = None, mode: str = "w", encoding: Optional[str] = None, compression: CompressionOptions = "infer", diff --git a/pandas/io/common.py b/pandas/io/common.py index 3f130401558dd..f177e08ac0089 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -208,6 +208,21 @@ def get_filepath_or_buffer( # handle compression dict compression_method, compression = get_compression_method(compression) compression_method = infer_compression(filepath_or_buffer, compression_method) + + # GH21227 internal compression is not used for non-binary handles. + if ( + compression_method + and hasattr(filepath_or_buffer, "write") + and mode + and "b" not in mode + ): + warnings.warn( + "compression has no effect when passing a non-binary object as input.", + RuntimeWarning, + stacklevel=2, + ) + compression_method = None + compression = dict(compression, method=compression_method) # bz2 and xz do not write the byte order mark for utf-16 and utf-32 diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 15cd5c026c6b6..90ab6f61f4d74 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -5,13 +5,18 @@ import csv as csvlib from io import StringIO, TextIOWrapper import os -from typing import Hashable, List, Optional, Sequence, Union -import warnings +from typing import Any, Dict, Hashable, Iterator, List, Optional, Sequence, Union import numpy as np from pandas._libs import writers as libwriters -from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions +from pandas._typing import ( + CompressionOptions, + FilePathOrBuffer, + IndexLabel, + Label, + StorageOptions, +) from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -21,6 +26,8 @@ ) from pandas.core.dtypes.missing import notna +from pandas.core.indexes.api import Index + from pandas.io.common import get_filepath_or_buffer, get_handle @@ -32,10 +39,10 @@ def __init__( sep: str = ",", na_rep: str = "", float_format: Optional[str] = None, - cols=None, + cols: Optional[Sequence[Label]] = None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, - index_label: Optional[Union[bool, Hashable, Sequence[Hashable]]] = None, + index_label: IndexLabel = None, mode: str = "w", encoding: Optional[str] = None, errors: str = "strict", @@ -43,7 +50,7 @@ def __init__( quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, - quotechar='"', + quotechar: Optional[str] = '"', date_format: Optional[str] = None, doublequote: bool = True, escapechar: Optional[str] = None, @@ -52,16 +59,19 @@ def __init__( ): self.obj = obj + self.encoding = encoding or "utf-8" + if path_or_buf is None: path_or_buf = StringIO() ioargs = get_filepath_or_buffer( path_or_buf, - encoding=encoding, + encoding=self.encoding, compression=compression, mode=mode, storage_options=storage_options, ) + self.compression = ioargs.compression.pop("method") self.compression_args = ioargs.compression self.path_or_buf = ioargs.filepath_or_buffer @@ -72,46 +82,79 @@ def __init__( self.na_rep = na_rep self.float_format = float_format self.decimal = decimal - self.header = header self.index = index self.index_label = index_label - if encoding is None: - encoding = "utf-8" - self.encoding = encoding self.errors = errors + self.quoting = quoting or csvlib.QUOTE_MINIMAL + self.quotechar = quotechar + self.doublequote = doublequote + self.escapechar = escapechar + self.line_terminator = line_terminator or os.linesep + self.date_format = date_format + self.cols = cols # type: ignore[assignment] + self.chunksize = chunksize # type: ignore[assignment] + + @property + def index_label(self) -> IndexLabel: + return self._index_label + + @index_label.setter + def index_label(self, index_label: IndexLabel) -> None: + if index_label is not False: + if index_label is None: + index_label = self._get_index_label_from_obj() + elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndexClass)): + # given a string for a DF with Index + index_label = [index_label] + self._index_label = index_label + + def _get_index_label_from_obj(self) -> List[str]: + if isinstance(self.obj.index, ABCMultiIndex): + return self._get_index_label_multiindex() + else: + return self._get_index_label_flat() + + def _get_index_label_multiindex(self) -> List[str]: + return [name or "" for name in self.obj.index.names] - if quoting is None: - quoting = csvlib.QUOTE_MINIMAL - self.quoting = quoting + def _get_index_label_flat(self) -> List[str]: + index_label = self.obj.index.name + return [""] if index_label is None else [index_label] - if quoting == csvlib.QUOTE_NONE: + @property + def quotechar(self) -> Optional[str]: + if self.quoting != csvlib.QUOTE_NONE: # prevents crash in _csv - quotechar = None - self.quotechar = quotechar + return self._quotechar + return None - self.doublequote = doublequote - self.escapechar = escapechar + @quotechar.setter + def quotechar(self, quotechar: Optional[str]) -> None: + self._quotechar = quotechar - self.line_terminator = line_terminator or os.linesep + @property + def has_mi_columns(self) -> bool: + return bool(isinstance(self.obj.columns, ABCMultiIndex)) - self.date_format = date_format + @property + def cols(self) -> Sequence[Label]: + return self._cols - self.has_mi_columns = isinstance(obj.columns, ABCMultiIndex) + @cols.setter + def cols(self, cols: Optional[Sequence[Label]]) -> None: + self._cols = self._refine_cols(cols) + def _refine_cols(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]: # validate mi options if self.has_mi_columns: if cols is not None: - raise TypeError("cannot specify cols with a MultiIndex on the columns") + msg = "cannot specify cols with a MultiIndex on the columns" + raise TypeError(msg) if cols is not None: if isinstance(cols, ABCIndexClass): - cols = cols.to_native_types( - na_rep=na_rep, - float_format=float_format, - date_format=date_format, - quoting=self.quoting, - ) + cols = cols.to_native_types(**self._number_format) else: cols = list(cols) self.obj = self.obj.loc[:, cols] @@ -120,58 +163,90 @@ def __init__( # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): - cols = cols.to_native_types( - na_rep=na_rep, - float_format=float_format, - date_format=date_format, - quoting=self.quoting, - ) + return cols.to_native_types(**self._number_format) else: - cols = list(cols) + assert isinstance(cols, Sequence) + return list(cols) - # save it - self.cols = cols + @property + def _number_format(self) -> Dict[str, Any]: + """Dictionary used for storing number formatting settings.""" + return dict( + na_rep=self.na_rep, + float_format=self.float_format, + date_format=self.date_format, + quoting=self.quoting, + decimal=self.decimal, + ) - # preallocate data 2d list - ncols = self.obj.shape[-1] - self.data = [None] * ncols + @property + def chunksize(self) -> int: + return self._chunksize + @chunksize.setter + def chunksize(self, chunksize: Optional[int]) -> None: if chunksize is None: chunksize = (100000 // (len(self.cols) or 1)) or 1 - self.chunksize = int(chunksize) + assert chunksize is not None + self._chunksize = int(chunksize) - self.data_index = obj.index + @property + def data_index(self) -> Index: + data_index = self.obj.index if ( - isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) - and date_format is not None + isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex)) + and self.date_format is not None ): - from pandas import Index - - self.data_index = Index( - [x.strftime(date_format) if notna(x) else "" for x in self.data_index] + data_index = Index( + [x.strftime(self.date_format) if notna(x) else "" for x in data_index] ) + return data_index + + @property + def nlevels(self) -> int: + if self.index: + return getattr(self.data_index, "nlevels", 1) + else: + return 0 + + @property + def _has_aliases(self) -> bool: + return isinstance(self.header, (tuple, list, np.ndarray, ABCIndexClass)) + + @property + def _need_to_save_header(self) -> bool: + return bool(self._has_aliases or self.header) + + @property + def write_cols(self) -> Sequence[Label]: + if self._has_aliases: + assert not isinstance(self.header, bool) + if len(self.header) != len(self.cols): + raise ValueError( + f"Writing {len(self.cols)} cols but got {len(self.header)} aliases" + ) + else: + return self.header + else: + return self.cols + + @property + def encoded_labels(self) -> List[Label]: + encoded_labels: List[Label] = [] + + if self.index and self.index_label: + assert isinstance(self.index_label, Sequence) + encoded_labels = list(self.index_label) - self.nlevels = getattr(self.data_index, "nlevels", 1) - if not index: - self.nlevels = 0 + if not self.has_mi_columns or self._has_aliases: + encoded_labels += list(self.write_cols) + + return encoded_labels def save(self) -> None: """ Create the writer & save. """ - # GH21227 internal compression is not used for non-binary handles. - if ( - self.compression - and hasattr(self.path_or_buf, "write") - and "b" not in self.mode - ): - warnings.warn( - "compression has no effect when passing a non-binary object as input.", - RuntimeWarning, - stacklevel=2, - ) - self.compression = None - # get a handle or wrap an existing handle to take care of 1) compression and # 2) text -> byte conversion f, handles = get_handle( @@ -215,133 +290,63 @@ def save(self) -> None: for _fh in handles: _fh.close() - def _save_header(self): - writer = self.writer - obj = self.obj - index_label = self.index_label - cols = self.cols - has_mi_columns = self.has_mi_columns - header = self.header - encoded_labels: List[str] = [] - - has_aliases = isinstance(header, (tuple, list, np.ndarray, ABCIndexClass)) - if not (has_aliases or self.header): - return - if has_aliases: - if len(header) != len(cols): - raise ValueError( - f"Writing {len(cols)} cols but got {len(header)} aliases" - ) - else: - write_cols = header - else: - write_cols = cols - - if self.index: - # should write something for index label - if index_label is not False: - if index_label is None: - if isinstance(obj.index, ABCMultiIndex): - index_label = [] - for i, name in enumerate(obj.index.names): - if name is None: - name = "" - index_label.append(name) - else: - index_label = obj.index.name - if index_label is None: - index_label = [""] - else: - index_label = [index_label] - elif not isinstance( - index_label, (list, tuple, np.ndarray, ABCIndexClass) - ): - # given a string for a DF with Index - index_label = [index_label] - - encoded_labels = list(index_label) - else: - encoded_labels = [] - - if not has_mi_columns or has_aliases: - encoded_labels += list(write_cols) - writer.writerow(encoded_labels) - else: - # write out the mi - columns = obj.columns - - # write out the names for each level, then ALL of the values for - # each level - for i in range(columns.nlevels): - - # we need at least 1 index column to write our col names - col_line = [] - if self.index: - - # name is the first column - col_line.append(columns.names[i]) - - if isinstance(index_label, list) and len(index_label) > 1: - col_line.extend([""] * (len(index_label) - 1)) - - col_line.extend(columns._get_level_values(i)) - - writer.writerow(col_line) - - # Write out the index line if it's not empty. - # Otherwise, we will print out an extraneous - # blank line between the mi and the data rows. - if encoded_labels and set(encoded_labels) != {""}: - encoded_labels.extend([""] * len(columns)) - writer.writerow(encoded_labels) - def _save(self) -> None: - self._save_header() + if self._need_to_save_header: + self._save_header() + self._save_body() + def _save_header(self) -> None: + if not self.has_mi_columns or self._has_aliases: + self.writer.writerow(self.encoded_labels) + else: + for row in self._generate_multiindex_header_rows(): + self.writer.writerow(row) + + def _generate_multiindex_header_rows(self) -> Iterator[List[Label]]: + columns = self.obj.columns + for i in range(columns.nlevels): + # we need at least 1 index column to write our col names + col_line = [] + if self.index: + # name is the first column + col_line.append(columns.names[i]) + + if isinstance(self.index_label, list) and len(self.index_label) > 1: + col_line.extend([""] * (len(self.index_label) - 1)) + + col_line.extend(columns._get_level_values(i)) + yield col_line + + # Write out the index line if it's not empty. + # Otherwise, we will print out an extraneous + # blank line between the mi and the data rows. + if self.encoded_labels and set(self.encoded_labels) != {""}: + yield self.encoded_labels + [""] * len(columns) + + def _save_body(self) -> None: nrows = len(self.data_index) - - # write in chunksize bites - chunksize = self.chunksize - chunks = int(nrows / chunksize) + 1 - + chunks = int(nrows / self.chunksize) + 1 for i in range(chunks): - start_i = i * chunksize - end_i = min((i + 1) * chunksize, nrows) + start_i = i * self.chunksize + end_i = min(start_i + self.chunksize, nrows) if start_i >= end_i: break - self._save_chunk(start_i, end_i) def _save_chunk(self, start_i: int, end_i: int) -> None: - data_index = self.data_index + ncols = self.obj.shape[-1] + data = [None] * ncols # create the data for a chunk slicer = slice(start_i, end_i) df = self.obj.iloc[slicer] - blocks = df._mgr.blocks - - for i in range(len(blocks)): - b = blocks[i] - d = b.to_native_types( - na_rep=self.na_rep, - float_format=self.float_format, - decimal=self.decimal, - date_format=self.date_format, - quoting=self.quoting, - ) - for col_loc, col in zip(b.mgr_locs, d): - # self.data is a preallocated list - self.data[col_loc] = col + for block in df._mgr.blocks: + d = block.to_native_types(**self._number_format) - ix = data_index.to_native_types( - slicer=slicer, - na_rep=self.na_rep, - float_format=self.float_format, - decimal=self.decimal, - date_format=self.date_format, - quoting=self.quoting, - ) + for col_loc, col in zip(block.mgr_locs, d): + data[col_loc] = col - libwriters.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer) + ix = self.data_index.to_native_types(slicer=slicer, **self._number_format) + libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer) From faae2f0c8e4aab3086d690bab248d1a2ab44fcff Mon Sep 17 00:00:00 2001 From: Satrio H Wicaksono <44076327+satrio-hw@users.noreply.github.com> Date: Wed, 9 Sep 2020 20:24:14 +0700 Subject: [PATCH 491/632] CLN: remove unnecessary trailing commas on issues #35925 (#36193) --- pandas/tests/arrays/categorical/test_replace.py | 8 ++------ pandas/tests/arrays/test_array.py | 8 ++++---- pandas/tests/arrays/test_timedeltas.py | 2 +- pandas/tests/base/test_conversion.py | 7 ++----- pandas/tests/dtypes/test_missing.py | 2 +- pandas/tests/extension/base/methods.py | 8 ++------ pandas/tests/extension/test_sparse.py | 4 +--- pandas/tests/frame/indexing/test_setitem.py | 8 ++------ 8 files changed, 15 insertions(+), 32 deletions(-) diff --git a/pandas/tests/arrays/categorical/test_replace.py b/pandas/tests/arrays/categorical/test_replace.py index b9ac3ce9a37ae..8b784fde1d3c5 100644 --- a/pandas/tests/arrays/categorical/test_replace.py +++ b/pandas/tests/arrays/categorical/test_replace.py @@ -43,9 +43,5 @@ def test_replace(to_replace, value, expected, flip_categories): # the replace call loses categorical dtype expected = pd.Series(np.asarray(expected)) - tm.assert_series_equal( - expected, result, check_category_order=False, - ) - tm.assert_series_equal( - expected, s, check_category_order=False, - ) + tm.assert_series_equal(expected, result, check_category_order=False) + tm.assert_series_equal(expected, s, check_category_order=False) diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index a0525aa511ee2..304e1c80a3f77 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -35,7 +35,7 @@ np.dtype("float32"), PandasArray(np.array([1.0, 2.0], dtype=np.dtype("float32"))), ), - (np.array([1, 2], dtype="int64"), None, IntegerArray._from_sequence([1, 2]),), + (np.array([1, 2], dtype="int64"), None, IntegerArray._from_sequence([1, 2])), # String alias passes through to NumPy ([1, 2], "float32", PandasArray(np.array([1, 2], dtype="float32"))), # Period alias @@ -120,10 +120,10 @@ (pd.Series([1, 2]), None, PandasArray(np.array([1, 2], dtype=np.int64))), # String (["a", None], "string", StringArray._from_sequence(["a", None])), - (["a", None], pd.StringDtype(), StringArray._from_sequence(["a", None]),), + (["a", None], pd.StringDtype(), StringArray._from_sequence(["a", None])), # Boolean ([True, None], "boolean", BooleanArray._from_sequence([True, None])), - ([True, None], pd.BooleanDtype(), BooleanArray._from_sequence([True, None]),), + ([True, None], pd.BooleanDtype(), BooleanArray._from_sequence([True, None])), # Index (pd.Index([1, 2]), None, PandasArray(np.array([1, 2], dtype=np.int64))), # Series[EA] returns the EA @@ -174,7 +174,7 @@ def test_array_copy(): period_array(["2000", "2001"], freq="D"), ), # interval - ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2]),), + ([pd.Interval(0, 1), pd.Interval(1, 2)], IntervalArray.from_breaks([0, 1, 2])), # datetime ( [pd.Timestamp("2000"), pd.Timestamp("2001")], diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index c86b4f71ee592..a32529cb58ba3 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -46,7 +46,7 @@ def test_incorrect_dtype_raises(self): TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype="category") with pytest.raises( - ValueError, match=r"dtype int64 cannot be converted to timedelta64\[ns\]", + ValueError, match=r"dtype int64 cannot be converted to timedelta64\[ns\]" ): TimedeltaArray(np.array([1, 2, 3], dtype="i8"), dtype=np.dtype("int64")) diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index b688a048cbe8e..b5595ba220a15 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -183,7 +183,7 @@ def test_iter_box(self): PeriodArray, pd.core.dtypes.dtypes.PeriodDtype("A-DEC"), ), - (pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",), + (pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval"), # This test is currently failing for datetime64[ns] and timedelta64[ns]. # The NumPy type system is sufficient for representing these types, so # we just use NumPy for Series / DataFrame columns of these types (so @@ -285,10 +285,7 @@ def test_array_multiindex_raises(): pd.core.arrays.period_array(["2000", "2001"], freq="D"), np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]), ), - ( - pd.core.arrays.integer_array([0, np.nan]), - np.array([0, pd.NA], dtype=object), - ), + (pd.core.arrays.integer_array([0, np.nan]), np.array([0, pd.NA], dtype=object)), ( IntervalArray.from_breaks([0, 1, 2]), np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object), diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 04dde08de082d..a642b23379c6f 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -373,7 +373,7 @@ def test_array_equivalent(dtype_equal): ) # The rest are not dtype_equal assert not array_equivalent( - DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern"), + DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan], tz="US/Eastern") ) assert not array_equivalent( DatetimeIndex([0, np.nan], tz="CET"), diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index 5e1cf30efd534..23e20a2c0903a 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -92,18 +92,14 @@ def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_valu assert data_missing_for_sorting.argmax() == 0 assert data_missing_for_sorting.argmin() == 2 - @pytest.mark.parametrize( - "method", ["argmax", "argmin"], - ) + @pytest.mark.parametrize("method", ["argmax", "argmin"]) def test_argmin_argmax_empty_array(self, method, data): # GH 24382 err_msg = "attempt to get" with pytest.raises(ValueError, match=err_msg): getattr(data[:0], method)() - @pytest.mark.parametrize( - "method", ["argmax", "argmin"], - ) + @pytest.mark.parametrize("method", ["argmax", "argmin"]) def test_argmin_argmax_all_na(self, method, data, na_value): # all missing with skipna=True is the same as emtpy err_msg = "attempt to get" diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index b411ca1c482a4..d11cfd219a443 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -316,9 +316,7 @@ def test_shift_0_periods(self, data): data._sparse_values[0] = data._sparse_values[1] assert result._sparse_values[0] != result._sparse_values[1] - @pytest.mark.parametrize( - "method", ["argmax", "argmin"], - ) + @pytest.mark.parametrize("method", ["argmax", "argmin"]) def test_argmin_argmax_all_na(self, method, data, na_value): # overriding because Sparse[int64, 0] cannot handle na_value self._check_unsupported(data) diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index c5945edfd3127..8313ab0b99bac 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -108,7 +108,7 @@ def test_setitem_timestamp_empty_columns(self): df["now"] = Timestamp("20130101", tz="UTC") expected = DataFrame( - [[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"], + [[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"] ) tm.assert_frame_equal(df, expected) @@ -158,11 +158,7 @@ def test_setitem_dict_preserves_dtypes(self): } ) for idx, b in enumerate([1, 2, 3]): - df.loc[df.shape[0]] = { - "a": int(idx), - "b": float(b), - "c": float(b), - } + df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)} tm.assert_frame_equal(df, expected) @pytest.mark.parametrize( From 1003a5cc9c7497f0b14079016bb0da941f41ae2b Mon Sep 17 00:00:00 2001 From: danchev <12420863+danchev@users.noreply.github.com> Date: Wed, 9 Sep 2020 10:12:05 -0500 Subject: [PATCH 492/632] Fixed a broken JSON Table Schema link (#36246) --- doc/source/user_guide/io.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 43030d76d945a..bf6575a8836f5 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -2412,7 +2412,7 @@ indicate missing values and the subsequent read cannot distinguish the intent. os.remove('test.json') -.. _Table Schema: https://specs.frictionlessdata.io/json-table-schema/ +.. _Table Schema: https://specs.frictionlessdata.io/table-schema/ HTML ---- From 6aa311db41896f9fcdc07e6610be8dd7117a5f27 Mon Sep 17 00:00:00 2001 From: Nikhil Choudhary <49715980+Nikhil1O1@users.noreply.github.com> Date: Thu, 10 Sep 2020 03:55:31 +0530 Subject: [PATCH 493/632] DOC: Improve Index docstrings (#36239) --- pandas/core/indexes/base.py | 2 +- pandas/core/indexes/numeric.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 8014b16d07b01..67456096e8681 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -195,7 +195,7 @@ def _new_Index(cls, d): class Index(IndexOpsMixin, PandasObject): """ - Immutable ndarray implementing an ordered, sliceable set. The basic object + Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. Parameters diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 079f43cb2c66b..125602ef2054a 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -189,7 +189,7 @@ def _union(self, other, sort): _num_index_shared_docs[ "class_descr" ] = """ - Immutable ndarray implementing an ordered, sliceable set. The basic object + Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. %(klass)s is a special case of `Index` with purely %(ltype)s labels. %(extra)s. From 03c704087ec02ce2f889ac0037b5082924df7fca Mon Sep 17 00:00:00 2001 From: Matthias Bussonnier Date: Thu, 10 Sep 2020 11:53:40 -0700 Subject: [PATCH 494/632] DOC: Rst Formatting, make sure continuation prompt are used. (#35317) --- pandas/core/indexing.py | 2 +- pandas/io/excel/_base.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index fe2fec1c52063..51031d9ab1153 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -59,7 +59,7 @@ class _IndexSlice: >>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']]) >>> columns = ['foo', 'bar'] >>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))), - index=midx, columns=columns) + ... index=midx, columns=columns) Using the default slice command: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 87343c22ad4e9..d597731ed0ac4 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -601,8 +601,8 @@ class ExcelWriter(metaclass=abc.ABCMeta): You can set the date format or datetime format: >>> with ExcelWriter('path_to_file.xlsx', - date_format='YYYY-MM-DD', - datetime_format='YYYY-MM-DD HH:MM:SS') as writer: + ... date_format='YYYY-MM-DD', + ... datetime_format='YYYY-MM-DD HH:MM:SS') as writer: ... df.to_excel(writer) You can also append to an existing Excel file: From a09259b5699b0f2667761356cb3b22ef43ef37f0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 10 Sep 2020 18:11:43 -0700 Subject: [PATCH 495/632] BUG: DataFrame.any with axis=1 and bool_only=True (#36106) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/frame.py | 14 +++++--------- pandas/tests/reductions/test_reductions.py | 7 +++++++ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2aac2596c18cb..ba556c8dcca54 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -246,6 +246,7 @@ Timezones Numeric ^^^^^^^ - Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`) +- Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`) - Conversion diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 59cf4c0e2f81d..3eed10917843b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8639,15 +8639,12 @@ def func(values): else: return op(values, axis=axis, skipna=skipna, **kwds) - def _get_data(axis_matters: bool) -> DataFrame: + def _get_data() -> DataFrame: if filter_type is None: data = self._get_numeric_data() elif filter_type == "bool": - if axis_matters: - # GH#25101, GH#24434 - data = self._get_bool_data() if axis == 0 else self - else: - data = self._get_bool_data() + # GH#25101, GH#24434 + data = self._get_bool_data() else: # pragma: no cover msg = ( f"Generating numeric_only data with filter_type {filter_type} " @@ -8659,7 +8656,7 @@ def _get_data(axis_matters: bool) -> DataFrame: if numeric_only is not None: df = self if numeric_only is True: - df = _get_data(axis_matters=True) + df = _get_data() if axis == 1: df = df.T axis = 0 @@ -8720,8 +8717,7 @@ def blk_func(values): except TypeError: # e.g. in nanops trying to convert strs to float - # TODO: why doesnt axis matter here? - data = _get_data(axis_matters=False) + data = _get_data() labels = data._get_agg_axis(axis) values = data.values diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index a112bc80b60b0..bbf2d9f1f0784 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -914,6 +914,13 @@ def test_all_any_boolean(self): tm.assert_series_equal(s.all(level=0), Series([False, True, False])) tm.assert_series_equal(s.any(level=0), Series([False, True, True])) + def test_any_axis1_bool_only(self): + # GH#32432 + df = pd.DataFrame({"A": [True, False], "B": [1, 2]}) + result = df.any(axis=1, bool_only=True) + expected = pd.Series([True, False]) + tm.assert_series_equal(result, expected) + def test_timedelta64_analytics(self): # index min/max From 6c8b923db4a5de5caf3fc05ab861e2c52b0ab8c5 Mon Sep 17 00:00:00 2001 From: Justin Essert Date: Fri, 11 Sep 2020 09:03:02 -0400 Subject: [PATCH 496/632] BUG: instantiation using a dict with a period scalar (#35966) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/construction.py | 2 +- pandas/core/dtypes/cast.py | 1 - pandas/core/indexes/interval.py | 2 ++ pandas/tests/dtypes/cast/test_infer_dtype.py | 4 +-- pandas/tests/frame/test_constructors.py | 18 ++++++++++++ pandas/tests/series/test_constructors.py | 29 +++++++++++++++++++- 7 files changed, 51 insertions(+), 7 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ba556c8dcca54..f2f56ee81b8d4 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -334,7 +334,7 @@ Sparse ExtensionArray ^^^^^^^^^^^^^^ -- +- Fixed Bug where :class:`DataFrame` column set to scalar extension type via a dict instantion was considered an object type rather than the extension type (:issue:`35965`) - diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 3812c306b8eb4..0993328aef8de 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -472,7 +472,7 @@ def sanitize_array( # figure out the dtype from the value (upcast if necessary) if dtype is None: - dtype, value = infer_dtype_from_scalar(value) + dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True) else: # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 8f9c0cf7a01db..ba1b0b075936d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -709,7 +709,6 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, elif pandas_dtype: if lib.is_period(val): dtype = PeriodDtype(freq=val.freq) - val = val.ordinal elif lib.is_interval(val): subtype = infer_dtype_from_scalar(val.left, pandas_dtype=True)[0] dtype = IntervalDtype(subtype=subtype) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 154f41bf07928..ad0a7ea32a1cc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -589,6 +589,8 @@ def _maybe_convert_i8(self, key): if scalar: # Timestamp/Timedelta key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True) + if lib.is_period(key): + key_i8 = key.ordinal else: # DatetimeIndex/TimedeltaIndex key_dtype, key_i8 = key.dtype, Index(key.asi8) diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py index 70d38aad951cc..157adacbdfdf7 100644 --- a/pandas/tests/dtypes/cast/test_infer_dtype.py +++ b/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -84,13 +84,11 @@ def test_infer_dtype_from_period(freq, pandas_dtype): if pandas_dtype: exp_dtype = f"period[{freq}]" - exp_val = p.ordinal else: exp_dtype = np.object_ - exp_val = p assert dtype == exp_dtype - assert val == exp_val + assert val == p @pytest.mark.parametrize( diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 0d1004809f7f1..eb334e811c5a4 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -717,6 +717,24 @@ def test_constructor_period_dict(self): assert df["a"].dtype == a.dtype assert df["b"].dtype == b.dtype + @pytest.mark.parametrize( + "data,dtype", + [ + (pd.Period("2012-01", freq="M"), "period[M]"), + (pd.Period("2012-02-01", freq="D"), "period[D]"), + (Interval(left=0, right=5), IntervalDtype("int64")), + (Interval(left=0.1, right=0.5), IntervalDtype("float64")), + ], + ) + def test_constructor_period_dict_scalar(self, data, dtype): + # scalar periods + df = DataFrame({"a": data}, index=[0]) + assert df["a"].dtype == dtype + + expected = DataFrame(index=[0], columns=["a"], data=data) + + tm.assert_frame_equal(df, expected) + @pytest.mark.parametrize( "data,dtype", [ diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index ce078059479b4..0fb8c5955a2e7 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -8,16 +8,23 @@ from pandas._libs import iNaT, lib from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype -from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + IntervalDtype, + PeriodDtype, +) import pandas as pd from pandas import ( Categorical, DataFrame, Index, + Interval, IntervalIndex, MultiIndex, NaT, + Period, Series, Timestamp, date_range, @@ -1075,6 +1082,26 @@ def test_constructor_dict_order(self): expected = Series([1, 0, 2], index=list("bac")) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize( + "data,dtype", + [ + (Period("2020-01"), PeriodDtype("M")), + (Interval(left=0, right=5), IntervalDtype("int64")), + ( + Timestamp("2011-01-01", tz="US/Eastern"), + DatetimeTZDtype(tz="US/Eastern"), + ), + ], + ) + def test_constructor_dict_extension(self, data, dtype): + d = {"a": data} + result = Series(d, index=["a"]) + expected = Series(data, index=["a"], dtype=dtype) + + assert result.dtype == dtype + + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("value", [2, np.nan, None, float("nan")]) def test_constructor_dict_nan_key(self, value): # GH 18480 From f8d5fba2eb2962b5bd4d65daeffecca37700ecc5 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 11 Sep 2020 06:04:37 -0700 Subject: [PATCH 497/632] REF: share more EA methods (#36209) --- pandas/core/arrays/_mixins.py | 11 +++++++-- pandas/core/arrays/categorical.py | 19 +++------------ pandas/core/arrays/datetimelike.py | 38 ++++-------------------------- pandas/core/arrays/numpy_.py | 17 ++++++------- 4 files changed, 26 insertions(+), 59 deletions(-) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 8b79f8ce66756..e9d8671b69c78 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -6,7 +6,7 @@ from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly, doc -from pandas.core.algorithms import searchsorted, take, unique +from pandas.core.algorithms import take, unique from pandas.core.array_algos.transforms import shift from pandas.core.arrays.base import ExtensionArray @@ -102,6 +102,9 @@ def T(self: _T) -> _T: # ------------------------------------------------------------------------ + def _values_for_argsort(self): + return self._ndarray + def copy(self: _T) -> _T: new_data = self._ndarray.copy() return self._from_backing_data(new_data) @@ -135,7 +138,11 @@ def _concat_same_type(cls, to_concat, axis: int = 0): @doc(ExtensionArray.searchsorted) def searchsorted(self, value, side="left", sorter=None): - return searchsorted(self._ndarray, value, side=side, sorter=sorter) + value = self._validate_searchsorted_value(value) + return self._ndarray.searchsorted(value, side=side, sorter=sorter) + + def _validate_searchsorted_value(self, value): + return value @doc(ExtensionArray.shift) def shift(self, periods=1, fill_value=None, axis=0): diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a2b5b54c55490..66d917b07305c 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -12,7 +12,7 @@ from pandas._libs import NaT, algos as libalgos, hashtable as htable, lib from pandas._typing import ArrayLike, Dtype, Ordered, Scalar from pandas.compat.numpy import function as nv -from pandas.util._decorators import cache_readonly, deprecate_kwarg, doc +from pandas.util._decorators import cache_readonly, deprecate_kwarg from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.dtypes.cast import ( @@ -45,12 +45,7 @@ import pandas.core.algorithms as algorithms from pandas.core.algorithms import _get_data_algo, factorize, take_1d, unique1d from pandas.core.arrays._mixins import NDArrayBackedExtensionArray -from pandas.core.base import ( - ExtensionArray, - NoNewAttributesMixin, - PandasObject, - _shared_docs, -) +from pandas.core.base import ExtensionArray, NoNewAttributesMixin, PandasObject import pandas.core.common as com from pandas.core.construction import array, extract_array, sanitize_array from pandas.core.indexers import check_array_indexer, deprecate_ndim_indexing @@ -1315,11 +1310,6 @@ def memory_usage(self, deep=False): """ return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep) - @doc(_shared_docs["searchsorted"], klass="Categorical") - def searchsorted(self, value, side="left", sorter=None): - value = self._validate_searchsorted_value(value) - return self.codes.searchsorted(value, side=side, sorter=sorter) - def isna(self): """ Detect missing values @@ -1428,9 +1418,6 @@ def check_for_ordered(self, op): "Categorical to an ordered one\n" ) - def _values_for_argsort(self): - return self._codes - def argsort(self, ascending=True, kind="quicksort", **kwargs): """ Return the indices that would sort the Categorical. @@ -1879,7 +1866,7 @@ def __getitem__(self, key): if result.ndim > 1: deprecate_ndim_indexing(result) return result - return self._constructor(result, dtype=self.dtype, fastpath=True) + return self._from_backing_data(result) def __setitem__(self, key, value): """ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index bb40cf78ea006..6302b48cb1978 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -545,15 +545,18 @@ def __getitem__(self, key): result = self._ndarray[key] if self.ndim == 1: return self._box_func(result) - return self._simple_new(result, dtype=self.dtype) + return self._from_backing_data(result) key = self._validate_getitem_key(key) result = self._ndarray[key] if lib.is_scalar(result): return self._box_func(result) + result = self._from_backing_data(result) + freq = self._get_getitem_freq(key) - return self._simple_new(result, dtype=self.dtype, freq=freq) + result._freq = freq + return result def _validate_getitem_key(self, key): if com.is_bool_indexer(key): @@ -714,9 +717,6 @@ def _values_for_factorize(self): def _from_factorized(cls, values, original): return cls(values, dtype=original.dtype) - def _values_for_argsort(self): - return self._ndarray - # ------------------------------------------------------------------ # Validation Methods # TODO: try to de-duplicate these, ensure identical behavior @@ -917,34 +917,6 @@ def _unbox(self, other, setitem: bool = False) -> Union[np.int64, np.ndarray]: # These are not part of the EA API, but we implement them because # pandas assumes they're there. - def searchsorted(self, value, side="left", sorter=None): - """ - Find indices where elements should be inserted to maintain order. - - Find the indices into a sorted array `self` such that, if the - corresponding elements in `value` were inserted before the indices, - the order of `self` would be preserved. - - Parameters - ---------- - value : array_like - Values to insert into `self`. - side : {'left', 'right'}, optional - If 'left', the index of the first suitable location found is given. - If 'right', return the last such index. If there is no suitable - index, return either 0 or N (where N is the length of `self`). - sorter : 1-D array_like, optional - Optional array of integer indices that sort `self` into ascending - order. They are typically the result of ``np.argsort``. - - Returns - ------- - indices : array of ints - Array of insertion points with the same shape as `value`. - """ - value = self._validate_searchsorted_value(value) - return self._data.searchsorted(value, side=side, sorter=sorter) - def value_counts(self, dropna=False): """ Return a Series containing counts of unique values. diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 588d68514649a..d3fa87d5ea7ff 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -260,15 +260,19 @@ def __getitem__(self, item): return result def __setitem__(self, key, value) -> None: - value = extract_array(value, extract_numpy=True) + key = self._validate_setitem_key(key) + value = self._validate_setitem_value(value) + self._ndarray[key] = value - key = check_array_indexer(self, key) - scalar_value = lib.is_scalar(value) + def _validate_setitem_value(self, value): + value = extract_array(value, extract_numpy=True) - if not scalar_value: + if not lib.is_scalar(value): value = np.asarray(value, dtype=self._ndarray.dtype) + return value - self._ndarray[key] = value + def _validate_setitem_key(self, key): + return check_array_indexer(self, key) def isna(self) -> np.ndarray: return isna(self._ndarray) @@ -308,9 +312,6 @@ def _validate_fill_value(self, fill_value): fill_value = self.dtype.na_value return fill_value - def _values_for_argsort(self) -> np.ndarray: - return self._ndarray - def _values_for_factorize(self) -> Tuple[np.ndarray, int]: return self._ndarray, -1 From 49b342b90d6a8a940d260803e31a5f189a336909 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 11 Sep 2020 06:12:11 -0700 Subject: [PATCH 498/632] CLN: simplify Categorical comparisons (#36237) --- pandas/core/arrays/categorical.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 66d917b07305c..81f9456502bf0 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -58,6 +58,7 @@ def _cat_compare_op(op): opname = f"__{op.__name__}__" + fill_value = True if op is operator.ne else False @unpack_zerodim_and_defer(opname) def func(self, other): @@ -92,26 +93,23 @@ def func(self, other): else: other_codes = other._codes - f = getattr(self._codes, opname) - ret = f(other_codes) + ret = op(self._codes, other_codes) mask = (self._codes == -1) | (other_codes == -1) if mask.any(): - # In other series, the leads to False, so do that here too - if opname == "__ne__": - ret[(self._codes == -1) & (other_codes == -1)] = True - else: - ret[mask] = False + ret[mask] = fill_value return ret if is_scalar(other): if other in self.categories: i = self.categories.get_loc(other) - ret = getattr(self._codes, opname)(i) + ret = op(self._codes, i) if opname not in {"__eq__", "__ge__", "__gt__"}: - # check for NaN needed if we are not equal or larger + # GH#29820 performance trick; get_loc will always give i>=0, + # so in the cases (__ne__, __le__, __lt__) the setting + # here is a no-op, so can be skipped. mask = self._codes == -1 - ret[mask] = False + ret[mask] = fill_value return ret else: return ops.invalid_comparison(self, other, op) From ddf2f05e25ca94794e295cf173e3fbc351581a78 Mon Sep 17 00:00:00 2001 From: Nikhil Choudhary <49715980+Nikhil1O1@users.noreply.github.com> Date: Fri, 11 Sep 2020 22:17:13 +0530 Subject: [PATCH 499/632] DOC: Update groupby.rst (#36238) --- doc/source/user_guide/groupby.rst | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index ddba3dc452e28..f745dab00bab8 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -87,11 +87,9 @@ The mapping can be specified many different ways: * A Python function, to be called on each of the axis labels. * A list or NumPy array of the same length as the selected axis. * A dict or ``Series``, providing a ``label -> group name`` mapping. -* For ``DataFrame`` objects, a string indicating a column to be used to group. - Of course ``df.groupby('A')`` is just syntactic sugar for - ``df.groupby(df['A'])``, but it makes life simpler. -* For ``DataFrame`` objects, a string indicating an index level to be used to - group. +* For ``DataFrame`` objects, a string indicating either a column name or + an index level name to be used to group. +* ``df.groupby('A')`` is just syntactic sugar for ``df.groupby(df['A'])``. * A list of any of the above things. Collectively we refer to the grouping objects as the **keys**. For example, From b2dda5a3c653d04b15a23cc3bc9603f5d026977c Mon Sep 17 00:00:00 2001 From: Joel Nothman Date: Sat, 12 Sep 2020 03:40:11 +1000 Subject: [PATCH 500/632] ENH add na_action to DataFrame.applymap (#35704) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/_libs/lib.pyx | 8 ++++++- pandas/core/frame.py | 25 +++++++++++++++++--- pandas/tests/frame/apply/test_frame_apply.py | 16 +++++++++++++ 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f2f56ee81b8d4..bce6a735b7b07 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -100,8 +100,8 @@ For example: Other enhancements ^^^^^^^^^^^^^^^^^^ - - Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a ``Series`` or ``DataFrame`` (:issue:`28394`) +- :meth:`DataFrame.applymap` now supports ``na_action`` (:issue:`23803`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index eadfcefaac73d..7464fafee2b94 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2377,7 +2377,7 @@ def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=Tr @cython.boundscheck(False) @cython.wraparound(False) -def map_infer(ndarray arr, object f, bint convert=True): +def map_infer(ndarray arr, object f, bint convert=True, bint ignore_na=False): """ Substitute for np.vectorize with pandas-friendly dtype inference. @@ -2385,6 +2385,9 @@ def map_infer(ndarray arr, object f, bint convert=True): ---------- arr : ndarray f : function + convert : bint + ignore_na : bint + If True, NA values will not have f applied Returns ------- @@ -2398,6 +2401,9 @@ def map_infer(ndarray arr, object f, bint convert=True): n = len(arr) result = np.empty(n, dtype=object) for i in range(n): + if ignore_na and checknull(arr[i]): + result[i] = arr[i] + continue val = f(arr[i]) if cnp.PyArray_IsZeroDim(val): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3eed10917843b..b03593ad8afe1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7619,7 +7619,7 @@ def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): ) return op.get_result() - def applymap(self, func) -> DataFrame: + def applymap(self, func, na_action: Optional[str] = None) -> DataFrame: """ Apply a function to a Dataframe elementwise. @@ -7630,6 +7630,10 @@ def applymap(self, func) -> DataFrame: ---------- func : callable Python function, returns a single value from a single value. + na_action : {None, 'ignore'}, default None + If ‘ignore’, propagate NaN values, without passing them to func. + + .. versionadded:: 1.2 Returns ------- @@ -7653,6 +7657,15 @@ def applymap(self, func) -> DataFrame: 0 3 4 1 5 5 + Like Series.map, NA values can be ignored: + + >>> df_copy = df.copy() + >>> df_copy.iloc[0, 0] = pd.NA + >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore') + 0 1 + 0 4 + 1 5 5 + Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. @@ -7668,11 +7681,17 @@ def applymap(self, func) -> DataFrame: 0 1.000000 4.494400 1 11.262736 20.857489 """ + if na_action not in {"ignore", None}: + raise ValueError( + f"na_action must be 'ignore' or None. Got {repr(na_action)}" + ) + ignore_na = na_action == "ignore" + # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: - return lib.map_infer(x, func) - return lib.map_infer(x.astype(object)._values, func) + return lib.map_infer(x, func, ignore_na=ignore_na) + return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na) return self.apply(infer) diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index bc09501583e2c..1662f9e2fff56 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -630,6 +630,22 @@ def test_applymap(self, float_frame): result = frame.applymap(func) tm.assert_frame_equal(result, frame) + def test_applymap_na_ignore(self, float_frame): + # GH 23803 + strlen_frame = float_frame.applymap(lambda x: len(str(x))) + float_frame_with_na = float_frame.copy() + mask = np.random.randint(0, 2, size=float_frame.shape, dtype=bool) + float_frame_with_na[mask] = pd.NA + strlen_frame_na_ignore = float_frame_with_na.applymap( + lambda x: len(str(x)), na_action="ignore" + ) + strlen_frame_with_na = strlen_frame.copy() + strlen_frame_with_na[mask] = pd.NA + tm.assert_frame_equal(strlen_frame_na_ignore, strlen_frame_with_na) + + with pytest.raises(ValueError, match="na_action must be .*Got 'abc'"): + float_frame_with_na.applymap(lambda x: len(str(x)), na_action="abc") + def test_applymap_box_timestamps(self): # GH 2689, GH 2627 ser = pd.Series(date_range("1/1/2000", periods=10)) From dc1c849f0cf4f1b8c5602a54f5231f6b57d1d913 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 11 Sep 2020 12:18:23 -0700 Subject: [PATCH 501/632] CI: xfail failing parquet test (#36272) --- pandas/tests/io/test_parquet.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 15f9837176315..35a400cba8671 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -8,6 +8,7 @@ import numpy as np import pytest +from pandas.compat import PY38 import pandas.util._test_decorators as td import pandas as pd @@ -564,8 +565,19 @@ def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so): write_kwargs=s3so, ) - @td.skip_if_no("s3fs") - @pytest.mark.parametrize("partition_col", [["A"], []]) + @td.skip_if_no("s3fs") # also requires flask + @pytest.mark.parametrize( + "partition_col", + [ + pytest.param( + ["A"], + marks=pytest.mark.xfail( + PY38, reason="Getting back empty DataFrame", raises=AssertionError, + ), + ), + [], + ], + ) def test_s3_roundtrip_for_dir( self, df_compat, s3_resource, pa, partition_col, s3so ): From da82aef120a221fd10ad74e11a8a0ee731a2b584 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 11 Sep 2020 13:51:54 -0700 Subject: [PATCH 502/632] de-privatize (#36259) --- pandas/compat/numpy/function.py | 4 ++-- pandas/core/algorithms.py | 6 +++--- pandas/core/arrays/categorical.py | 6 +++--- pandas/core/ops/__init__.py | 20 +++++++++--------- pandas/core/ops/methods.py | 34 +++++++++++++++---------------- pandas/io/excel/__init__.py | 8 ++++---- pandas/io/excel/_base.py | 16 +++++++-------- pandas/io/excel/_odfreader.py | 2 +- pandas/io/excel/_odswriter.py | 2 +- pandas/io/excel/_openpyxl.py | 4 ++-- pandas/io/excel/_pyxlsb.py | 2 +- pandas/io/excel/_xlrd.py | 2 +- pandas/io/excel/_xlsxwriter.py | 2 +- pandas/io/excel/_xlwt.py | 2 +- pandas/util/_test_decorators.py | 5 +++-- 15 files changed, 58 insertions(+), 57 deletions(-) diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index d7a14c28cc9ca..5f627aeade47c 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -21,7 +21,7 @@ from distutils.version import LooseVersion from typing import Any, Dict, Optional, Union -from numpy import __version__ as _np_version, ndarray +from numpy import __version__, ndarray from pandas._libs.lib import is_bool, is_integer from pandas.errors import UnsupportedFunctionCall @@ -122,7 +122,7 @@ def validate_argmax_with_skipna(skipna, args, kwargs): ARGSORT_DEFAULTS["kind"] = "quicksort" ARGSORT_DEFAULTS["order"] = None -if LooseVersion(_np_version) >= LooseVersion("1.17.0"): +if LooseVersion(__version__) >= LooseVersion("1.17.0"): # GH-26361. NumPy added radix sort and changed default to None. ARGSORT_DEFAULTS["kind"] = None diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 57e63daff29e4..872c51c7dfa75 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -262,7 +262,7 @@ def _get_values_for_rank(values): return values -def _get_data_algo(values): +def get_data_algo(values): values = _get_values_for_rank(values) ndtype = _check_object_for_strings(values) @@ -491,7 +491,7 @@ def factorize_array( codes : ndarray uniques : ndarray """ - hash_klass, values = _get_data_algo(values) + hash_klass, values = get_data_algo(values) table = hash_klass(size_hint or len(values)) uniques, codes = table.factorize( @@ -2086,7 +2086,7 @@ def sort_mixed(values): if sorter is None: # mixed types - hash_klass, values = _get_data_algo(values) + hash_klass, values = get_data_algo(values) t = hash_klass(len(values)) t.map_locations(values) sorter = ensure_platform_int(t.lookup(ordered)) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 81f9456502bf0..e73a1404c6434 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -43,7 +43,7 @@ from pandas.core import ops from pandas.core.accessor import PandasDelegate, delegate_names import pandas.core.algorithms as algorithms -from pandas.core.algorithms import _get_data_algo, factorize, take_1d, unique1d +from pandas.core.algorithms import factorize, get_data_algo, take_1d, unique1d from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.base import ExtensionArray, NoNewAttributesMixin, PandasObject import pandas.core.common as com @@ -2531,8 +2531,8 @@ def _get_codes_for_values(values, categories): # Only hit here when we've already coerced to object dtypee. - hash_klass, vals = _get_data_algo(values) - _, cats = _get_data_algo(categories) + hash_klass, vals = get_data_algo(values) + _, cats = get_data_algo(categories) t = hash_klass(len(cats)) t.map_locations(cats) return coerce_indexer_dtype(t.lookup(vals), cats) diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 8fcbee6a20ac3..6763db1e2b138 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -324,12 +324,12 @@ def _align_method_SERIES(left: "Series", right, align_asobject: bool = False): return left, right -def _arith_method_SERIES(cls, op, special): +def arith_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ - assert special # non-special uses _flex_method_SERIES + assert special # non-special uses flex_method_SERIES op_name = _get_op_name(op, special) @unpack_zerodim_and_defer(op_name) @@ -348,12 +348,12 @@ def wrapper(left, right): return wrapper -def _comp_method_SERIES(cls, op, special): +def comp_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ - assert special # non-special uses _flex_method_SERIES + assert special # non-special uses flex_method_SERIES op_name = _get_op_name(op, special) @unpack_zerodim_and_defer(op_name) @@ -375,12 +375,12 @@ def wrapper(self, other): return wrapper -def _bool_method_SERIES(cls, op, special): +def bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ - assert special # non-special uses _flex_method_SERIES + assert special # non-special uses flex_method_SERIES op_name = _get_op_name(op, special) @unpack_zerodim_and_defer(op_name) @@ -398,7 +398,7 @@ def wrapper(self, other): return wrapper -def _flex_method_SERIES(cls, op, special): +def flex_method_SERIES(cls, op, special): assert not special # "special" also means "not flex" name = _get_op_name(op, special) doc = _make_flex_doc(name, "series") @@ -614,7 +614,7 @@ def _maybe_align_series_as_frame(frame: "DataFrame", series: "Series", axis: int return type(frame)(rvalues, index=frame.index, columns=frame.columns) -def _arith_method_FRAME(cls: Type["DataFrame"], op, special: bool): +def arith_method_FRAME(cls: Type["DataFrame"], op, special: bool): # This is the only function where `special` can be either True or False op_name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(op_name) @@ -666,7 +666,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): return f -def _flex_comp_method_FRAME(cls: Type["DataFrame"], op, special: bool): +def flex_comp_method_FRAME(cls: Type["DataFrame"], op, special: bool): assert not special # "special" also means "not flex" op_name = _get_op_name(op, special) default_axis = _get_frame_op_default_axis(op_name) @@ -690,7 +690,7 @@ def f(self, other, axis=default_axis, level=None): return f -def _comp_method_FRAME(cls: Type["DataFrame"], op, special: bool): +def comp_method_FRAME(cls: Type["DataFrame"], op, special: bool): assert special # "special" also means "not flex" op_name = _get_op_name(op, special) diff --git a/pandas/core/ops/methods.py b/pandas/core/ops/methods.py index a4694a6e5134f..e04db92b58c36 100644 --- a/pandas/core/ops/methods.py +++ b/pandas/core/ops/methods.py @@ -44,28 +44,28 @@ def _get_method_wrappers(cls): # TODO: make these non-runtime imports once the relevant functions # are no longer in __init__ from pandas.core.ops import ( - _arith_method_FRAME, - _arith_method_SERIES, - _bool_method_SERIES, - _comp_method_FRAME, - _comp_method_SERIES, - _flex_comp_method_FRAME, - _flex_method_SERIES, + arith_method_FRAME, + arith_method_SERIES, + bool_method_SERIES, + comp_method_FRAME, + comp_method_SERIES, + flex_comp_method_FRAME, + flex_method_SERIES, ) if issubclass(cls, ABCSeries): # Just Series - arith_flex = _flex_method_SERIES - comp_flex = _flex_method_SERIES - arith_special = _arith_method_SERIES - comp_special = _comp_method_SERIES - bool_special = _bool_method_SERIES + arith_flex = flex_method_SERIES + comp_flex = flex_method_SERIES + arith_special = arith_method_SERIES + comp_special = comp_method_SERIES + bool_special = bool_method_SERIES elif issubclass(cls, ABCDataFrame): - arith_flex = _arith_method_FRAME - comp_flex = _flex_comp_method_FRAME - arith_special = _arith_method_FRAME - comp_special = _comp_method_FRAME - bool_special = _arith_method_FRAME + arith_flex = arith_method_FRAME + comp_flex = flex_comp_method_FRAME + arith_special = arith_method_FRAME + comp_special = comp_method_FRAME + bool_special = arith_method_FRAME return arith_flex, comp_flex, arith_special, comp_special, bool_special diff --git a/pandas/io/excel/__init__.py b/pandas/io/excel/__init__.py index d035223957a76..3bad493dee388 100644 --- a/pandas/io/excel/__init__.py +++ b/pandas/io/excel/__init__.py @@ -1,9 +1,9 @@ from pandas.io.excel._base import ExcelFile, ExcelWriter, read_excel -from pandas.io.excel._odswriter import _ODSWriter -from pandas.io.excel._openpyxl import _OpenpyxlWriter +from pandas.io.excel._odswriter import ODSWriter as _ODSWriter +from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter from pandas.io.excel._util import register_writer -from pandas.io.excel._xlsxwriter import _XlsxWriter -from pandas.io.excel._xlwt import _XlwtWriter +from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter +from pandas.io.excel._xlwt import XlwtWriter as _XlwtWriter __all__ = ["read_excel", "ExcelWriter", "ExcelFile"] diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index d597731ed0ac4..e9634ff0e9a05 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -844,16 +844,16 @@ class ExcelFile: - ``pyxlsb`` supports Binary Excel files. """ - from pandas.io.excel._odfreader import _ODFReader - from pandas.io.excel._openpyxl import _OpenpyxlReader - from pandas.io.excel._pyxlsb import _PyxlsbReader - from pandas.io.excel._xlrd import _XlrdReader + from pandas.io.excel._odfreader import ODFReader + from pandas.io.excel._openpyxl import OpenpyxlReader + from pandas.io.excel._pyxlsb import PyxlsbReader + from pandas.io.excel._xlrd import XlrdReader _engines: Mapping[str, Any] = { - "xlrd": _XlrdReader, - "openpyxl": _OpenpyxlReader, - "odf": _ODFReader, - "pyxlsb": _PyxlsbReader, + "xlrd": XlrdReader, + "openpyxl": OpenpyxlReader, + "odf": ODFReader, + "pyxlsb": PyxlsbReader, } def __init__( diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 02575ab878f6e..ffb599cdfaaf8 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -10,7 +10,7 @@ from pandas.io.excel._base import BaseExcelReader -class _ODFReader(BaseExcelReader): +class ODFReader(BaseExcelReader): """ Read tables out of OpenDocument formatted files. diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index e7684012c1d4c..cbac60dfabaa7 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -9,7 +9,7 @@ from pandas.io.formats.excel import ExcelCell -class _ODSWriter(ExcelWriter): +class ODSWriter(ExcelWriter): engine = "odf" supported_extensions = (".ods",) diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index f395127902101..a5cadf4d93389 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -12,7 +12,7 @@ from openpyxl.descriptors.serialisable import Serialisable -class _OpenpyxlWriter(ExcelWriter): +class OpenpyxlWriter(ExcelWriter): engine = "openpyxl" supported_extensions = (".xlsx", ".xlsm") @@ -438,7 +438,7 @@ def write_cells( setattr(xcell, k, v) -class _OpenpyxlReader(BaseExcelReader): +class OpenpyxlReader(BaseExcelReader): def __init__( self, filepath_or_buffer: FilePathOrBuffer, diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index 069c3a2eaa643..ac94f4dd3df74 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -6,7 +6,7 @@ from pandas.io.excel._base import BaseExcelReader -class _PyxlsbReader(BaseExcelReader): +class PyxlsbReader(BaseExcelReader): def __init__( self, filepath_or_buffer: FilePathOrBuffer, diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index 9057106fb08e5..dfd5dde0329ae 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -8,7 +8,7 @@ from pandas.io.excel._base import BaseExcelReader -class _XlrdReader(BaseExcelReader): +class XlrdReader(BaseExcelReader): def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): """ Reader using xlrd engine. diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index 53f0c94d12e4c..16c4d377d7610 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -158,7 +158,7 @@ def convert(cls, style_dict, num_format_str=None): return props -class _XlsxWriter(ExcelWriter): +class XlsxWriter(ExcelWriter): engine = "xlsxwriter" supported_extensions = (".xlsx",) diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index faebe526d17bd..3592c2684f5a5 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -9,7 +9,7 @@ from xlwt import XFStyle -class _XlwtWriter(ExcelWriter): +class XlwtWriter(ExcelWriter): engine = "xlwt" supported_extensions = (".xls",) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index e9deaf3fe67de..0e8f6b933cd97 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -33,7 +33,6 @@ def test_foo(): from pandas.compat import IS64, is_platform_windows from pandas.compat._optional import import_optional_dependency -from pandas.compat.numpy import _np_version from pandas.core.computation.expressions import NUMEXPR_INSTALLED, USE_NUMEXPR @@ -205,7 +204,9 @@ def skip_if_no(package: str, min_version: Optional[str] = None): def skip_if_np_lt(ver_str: str, *args, reason: Optional[str] = None): if reason is None: reason = f"NumPy {ver_str} or greater required" - return pytest.mark.skipif(_np_version < LooseVersion(ver_str), *args, reason=reason) + return pytest.mark.skipif( + np.__version__ < LooseVersion(ver_str), *args, reason=reason + ) def parametrize_fixture_doc(*args): From 5450233ec7182f83eafa775d9ef6dbaa1b49e0bd Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Fri, 11 Sep 2020 23:33:02 +0200 Subject: [PATCH 503/632] Update deprecation warnings, which were already removed (#36292) --- doc/source/user_guide/indexing.rst | 19 ++++++++++++------- doc/source/user_guide/io.rst | 13 +++---------- doc/source/user_guide/timeseries.rst | 8 ++++---- 3 files changed, 19 insertions(+), 21 deletions(-) diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index cac18f5bf39cd..74abbc9503db0 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -313,8 +313,10 @@ Selection by label .. warning:: - Starting in 0.21.0, pandas will show a ``FutureWarning`` if indexing with a list with missing labels. In the future - this will raise a ``KeyError``. See :ref:`list-like Using loc with missing keys in a list is Deprecated `. + .. versionchanged:: 1.0.0 + + Pandas will raise a ``KeyError`` if indexing with a list with missing labels. See :ref:`list-like Using loc with + missing keys in a list is Deprecated `. pandas provides a suite of methods in order to have **purely label based indexing**. This is a strict inclusion based protocol. Every label asked for must be in the index, or a ``KeyError`` will be raised. @@ -578,8 +580,9 @@ IX indexer is deprecated .. warning:: - Starting in 0.20.0, the ``.ix`` indexer is deprecated, in favor of the more strict ``.iloc`` - and ``.loc`` indexers. + .. versionchanged:: 1.0.0 + + The ``.ix`` indexer was removed, in favor of the more strict ``.iloc`` and ``.loc`` indexers. ``.ix`` offers a lot of magic on the inference of what the user wants to do. To wit, ``.ix`` can decide to index *positionally* OR via *labels* depending on the data type of the index. This has caused quite a @@ -636,11 +639,13 @@ Indexing with list with missing labels is deprecated .. warning:: - Starting in 0.21.0, using ``.loc`` or ``[]`` with a list with one or more missing labels, is deprecated, in favor of ``.reindex``. + .. versionchanged:: 1.0.0 + + Using ``.loc`` or ``[]`` with a list with one or more missing labels will no longer reindex, in favor of ``.reindex``. In prior versions, using ``.loc[list-of-labels]`` would work as long as *at least 1* of the keys was found (otherwise it -would raise a ``KeyError``). This behavior is deprecated and will show a warning message pointing to this section. The -recommended alternative is to use ``.reindex()``. +would raise a ``KeyError``). This behavior was changed and will now raise a ``KeyError`` if at least one label is missing. +The recommended alternative is to use ``.reindex()``. For example. diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index bf6575a8836f5..a1ce2f847d4b8 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3024,19 +3024,12 @@ It is often the case that users will insert columns to do temporary computations in Excel and you may not want to read in those columns. ``read_excel`` takes a ``usecols`` keyword to allow you to specify a subset of columns to parse. -.. deprecated:: 0.24.0 +.. versionchanged:: 1.0.0 -Passing in an integer for ``usecols`` has been deprecated. Please pass in a list +Passing in an integer for ``usecols`` will no longer work. Please pass in a list of ints from 0 to ``usecols`` inclusive instead. -If ``usecols`` is an integer, then it is assumed to indicate the last column -to be parsed. - -.. code-block:: python - - pd.read_excel('path_to_file.xls', 'Sheet1', usecols=2) - -You can also specify a comma-delimited set of Excel columns and ranges as a string: +You can specify a comma-delimited set of Excel columns and ranges as a string: .. code-block:: python diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 0bfe9d9b68cdb..71eefb9a76562 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -327,11 +327,11 @@ which can be specified. These are computed from the starting point specified by that was discussed :ref:`above`). The available units are listed on the documentation for :func:`pandas.to_datetime`. +.. versionchanged:: 1.0.0 + Constructing a :class:`Timestamp` or :class:`DatetimeIndex` with an epoch timestamp -with the ``tz`` argument specified will currently localize the epoch timestamps to UTC -first then convert the result to the specified time zone. However, this behavior -is :ref:`deprecated `, and if you have -epochs in wall time in another timezone, it is recommended to read the epochs +with the ``tz`` argument specified will raise a ValueError. If you have +epochs in wall time in another timezone, you can read the epochs as timezone-naive timestamps and then localize to the appropriate timezone: .. ipython:: python From 2067d7e306ae720d455f356e4da21f282a8a762e Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 11 Sep 2020 18:00:15 -0700 Subject: [PATCH 504/632] CLN: typo cleanups (#36276) * typo cleanups * typo fixup --- pandas/_libs/index.pyx | 2 +- pandas/tests/arrays/categorical/test_indexing.py | 2 +- scripts/validate_unwanted_patterns.py | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 569562f5b5037..8155e7e6c074a 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -260,7 +260,7 @@ cdef class IndexEngine: def get_indexer_non_unique(self, targets): """ Return an indexer suitable for taking from a non unique index - return the labels in the same order ast the target + return the labels in the same order as the target and a missing indexer into the targets (which correspond to the -1 indices in the results """ diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py index abfae189bb4d7..ab8606ef9258d 100644 --- a/pandas/tests/arrays/categorical/test_indexing.py +++ b/pandas/tests/arrays/categorical/test_indexing.py @@ -183,7 +183,7 @@ def test_get_indexer_non_unique(self, idx_values, key_values, key_class): # GH 21448 key = key_class(key_values, categories=range(1, 5)) # Test for flat index and CategoricalIndex with same/different cats: - for dtype in None, "category", key.dtype: + for dtype in [None, "category", key.dtype]: idx = Index(idx_values, dtype=dtype) expected, exp_miss = idx.get_indexer_non_unique(key_values) result, res_miss = idx.get_indexer_non_unique(key) diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 1a6d8cc8b9914..2add2b8c62a4e 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -357,9 +357,9 @@ def main( output_format : str Output format of the error message. file_extensions_to_check : str - Coma seperated values of what file extensions to check. + Comma separated values of what file extensions to check. excluded_file_paths : str - Coma seperated values of what file paths to exclude during the check. + Comma separated values of what file paths to exclude during the check. Returns ------- @@ -444,7 +444,7 @@ def main( parser.add_argument( "--included-file-extensions", default="py,pyx,pxd,pxi", - help="Coma seperated file extensions to check.", + help="Comma separated file extensions to check.", ) parser.add_argument( "--excluded-file-paths", From 15fd0e74a4a188332d2484539101ebd171f40d6e Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 13:31:29 -0700 Subject: [PATCH 505/632] REF: de-duplicate _wrap_joined_index (#36282) --- pandas/core/indexes/base.py | 2 +- pandas/core/indexes/category.py | 3 ++- pandas/core/indexes/datetimelike.py | 4 +++- pandas/core/indexes/multi.py | 2 +- pandas/core/indexes/numeric.py | 9 --------- 5 files changed, 7 insertions(+), 13 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 67456096e8681..5d15d33e7e215 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3877,7 +3877,7 @@ def _join_monotonic(self, other, how="left", return_indexers=False): def _wrap_joined_index(self, joined, other): name = get_op_result_name(self, other) - return Index(joined, name=name) + return self._constructor(joined, name=name) # -------------------------------------------------------------------- # Uncategorized Methods diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 7509cb35069e8..98ef473a13348 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -767,7 +767,8 @@ def _wrap_joined_index( self, joined: np.ndarray, other: "CategoricalIndex" ) -> "CategoricalIndex": name = get_op_result_name(self, other) - return self._create_from_codes(joined, name=name) + cat = self._data._from_backing_data(joined) + return type(self)._simple_new(cat, name=name) CategoricalIndex._add_logical_methods_disabled() diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 54c8ed60b6097..13236f8488ecb 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -603,7 +603,9 @@ def _wrap_joined_index(self, joined: np.ndarray, other): else: self = cast(DatetimeTimedeltaMixin, self) freq = self.freq if self._can_fast_union(other) else None - new_data = type(self._data)._simple_new(joined, dtype=self.dtype, freq=freq) + + new_data = self._data._from_backing_data(joined) + new_data._freq = freq return type(self)._simple_new(new_data, name=name) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index deeb7ff50b88c..7aceb898f5ccf 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3637,7 +3637,7 @@ def delete(self, loc): def _wrap_joined_index(self, joined, other): names = self.names if self.names == other.names else None - return MultiIndex.from_tuples(joined, names=names) + return self._constructor(joined, names=names) @doc(Index.isin) def isin(self, values, level=None): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 125602ef2054a..e8b7efeee8852 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -33,7 +33,6 @@ from pandas.core import algorithms import pandas.core.common as com from pandas.core.indexes.base import Index, maybe_extract_name -from pandas.core.ops import get_op_result_name _num_index_shared_docs = dict() @@ -262,10 +261,6 @@ class Int64Index(IntegerIndex): _engine_type = libindex.Int64Engine _default_dtype = np.dtype(np.int64) - def _wrap_joined_index(self, joined, other): - name = get_op_result_name(self, other) - return Int64Index(joined, name=name) - @classmethod def _assert_safe_casting(cls, data, subarr): """ @@ -324,10 +319,6 @@ def _convert_index_indexer(self, keyarr): # ---------------------------------------------------------------- - def _wrap_joined_index(self, joined, other): - name = get_op_result_name(self, other) - return UInt64Index(joined, name=name) - @classmethod def _assert_safe_casting(cls, data, subarr): """ From 21fe97204227df0a641bd363b2a7e6732dad9091 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 13:33:52 -0700 Subject: [PATCH 506/632] REF: de-duplicate sort_values (#36301) --- pandas/core/indexes/base.py | 8 +++++--- pandas/core/indexes/datetimelike.py | 17 ----------------- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5d15d33e7e215..0aed08d46657e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4444,8 +4444,8 @@ def asof_locs(self, where, mask): def sort_values( self, - return_indexer=False, - ascending=True, + return_indexer: bool = False, + ascending: bool = True, na_position: str_t = "last", key: Optional[Callable] = None, ): @@ -4509,7 +4509,9 @@ def sort_values( # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MutiIndex - if not isinstance(self, ABCMultiIndex): + if not isinstance( + self, (ABCMultiIndex, ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex) + ): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 13236f8488ecb..5ba5732c710f7 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -41,7 +41,6 @@ ) from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name -from pandas.core.sorting import ensure_key_mapped from pandas.core.tools.timedeltas import to_timedelta _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -164,22 +163,6 @@ def __contains__(self, key: Any) -> bool: is_scalar(res) or isinstance(res, slice) or (is_list_like(res) and len(res)) ) - def sort_values(self, return_indexer=False, ascending=True, key=None): - """ - Return sorted copy of Index. - """ - idx = ensure_key_mapped(self, key) - - _as = idx.argsort() - if not ascending: - _as = _as[::-1] - sorted_index = self.take(_as) - - if return_indexer: - return sorted_index, _as - else: - return sorted_index - @Appender(_index_shared_docs["take"] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) From 2da7c343abf0c3b94004c537e53fa368051eefdd Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 13:40:20 -0700 Subject: [PATCH 507/632] PERF: get_dtype_kinds (#36309) --- pandas/core/dtypes/concat.py | 51 ++++++++++++------------------ pandas/tests/dtypes/test_concat.py | 4 +-- 2 files changed, 23 insertions(+), 32 deletions(-) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index dd005752a4832..07904339b93df 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -1,7 +1,7 @@ """ Utility functions related to concat. """ -from typing import cast +from typing import Set, cast import numpy as np @@ -9,15 +9,10 @@ from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( - is_bool_dtype, is_categorical_dtype, - is_datetime64_dtype, - is_datetime64tz_dtype, is_dtype_equal, is_extension_array_dtype, - is_object_dtype, is_sparse, - is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCRangeIndex, ABCSeries @@ -26,7 +21,7 @@ from pandas.core.construction import array -def get_dtype_kinds(l): +def _get_dtype_kinds(l) -> Set[str]: """ Parameters ---------- @@ -34,34 +29,30 @@ def get_dtype_kinds(l): Returns ------- - a set of kinds that exist in this list of arrays + set[str] + A set of kinds that exist in this list of arrays. """ - typs = set() + typs: Set[str] = set() for arr in l: + # Note: we use dtype.kind checks because they are much more performant + # than is_foo_dtype dtype = arr.dtype - if is_categorical_dtype(dtype): - typ = "category" - elif is_sparse(dtype): - typ = "sparse" + if not isinstance(dtype, np.dtype): + # ExtensionDtype so we get + # e.g. "categorical", "datetime64[ns, US/Central]", "Sparse[itn64, 0]" + typ = str(dtype) elif isinstance(arr, ABCRangeIndex): typ = "range" - elif is_datetime64tz_dtype(dtype): - # if to_concat contains different tz, - # the result must be object dtype - typ = str(dtype) - elif is_datetime64_dtype(dtype): + elif dtype.kind == "M": typ = "datetime" - elif is_timedelta64_dtype(dtype): + elif dtype.kind == "m": typ = "timedelta" - elif is_object_dtype(dtype): - typ = "object" - elif is_bool_dtype(dtype): - typ = "bool" - elif is_extension_array_dtype(dtype): - typ = str(dtype) + elif dtype.kind in ["O", "b"]: + typ = str(dtype) # i.e. "object", "bool" else: typ = dtype.kind + typs.add(typ) return typs @@ -140,7 +131,7 @@ def is_nonempty(x) -> bool: if non_empties and axis == 0: to_concat = non_empties - typs = get_dtype_kinds(to_concat) + typs = _get_dtype_kinds(to_concat) _contains_datetime = any(typ.startswith("datetime") for typ in typs) all_empty = not len(non_empties) @@ -161,13 +152,13 @@ def is_nonempty(x) -> bool: return np.concatenate(to_concat) elif _contains_datetime or "timedelta" in typs: - return concat_datetime(to_concat, axis=axis, typs=typs) + return _concat_datetime(to_concat, axis=axis, typs=typs) elif all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) - typs = get_dtype_kinds(to_concat) + typs = _get_dtype_kinds(to_concat) if len(typs) != 1: if not len(typs - {"i", "u", "f"}) or not len(typs - {"bool", "i", "u"}): @@ -361,7 +352,7 @@ def _concatenate_2d(to_concat, axis: int): return np.concatenate(to_concat, axis=axis) -def concat_datetime(to_concat, axis=0, typs=None): +def _concat_datetime(to_concat, axis=0, typs=None): """ provide concatenation of an datetimelike array of arrays each of which is a single M8[ns], datetime64[ns, tz] or m8[ns] dtype @@ -377,7 +368,7 @@ def concat_datetime(to_concat, axis=0, typs=None): a single array, preserving the combined dtypes """ if typs is None: - typs = get_dtype_kinds(to_concat) + typs = _get_dtype_kinds(to_concat) to_concat = [_wrap_datetimelike(x) for x in to_concat] single_dtype = len({x.dtype for x in to_concat}) == 1 diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py index 5a9ad732792ea..53d53e35c6eb5 100644 --- a/pandas/tests/dtypes/test_concat.py +++ b/pandas/tests/dtypes/test_concat.py @@ -44,7 +44,7 @@ ) def test_get_dtype_kinds(index_or_series, to_concat, expected): to_concat_klass = [index_or_series(c) for c in to_concat] - result = _concat.get_dtype_kinds(to_concat_klass) + result = _concat._get_dtype_kinds(to_concat_klass) assert result == set(expected) @@ -76,7 +76,7 @@ def test_get_dtype_kinds(index_or_series, to_concat, expected): ], ) def test_get_dtype_kinds_period(to_concat, expected): - result = _concat.get_dtype_kinds(to_concat) + result = _concat._get_dtype_kinds(to_concat) assert result == set(expected) From 4dc58879161bf8eb09d51d488e02e1725be86dbd Mon Sep 17 00:00:00 2001 From: Maxim Ivanov <41443370+ivanovmg@users.noreply.github.com> Date: Sun, 13 Sep 2020 03:54:22 +0700 Subject: [PATCH 508/632] CLN: pandas/io/parsers.py (#36269) --- pandas/io/parsers.py | 53 ++++++++++++++++---------------------------- 1 file changed, 19 insertions(+), 34 deletions(-) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 4c619a636f057..b963d5be69b5f 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -421,10 +421,6 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): kwds["encoding"] = encoding compression = kwds.get("compression", "infer") - # TODO: get_filepath_or_buffer could return - # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] - # though mypy handling of conditional imports is difficult. - # See https://github.com/python/mypy/issues/1297 ioargs = get_filepath_or_buffer( filepath_or_buffer, encoding, compression, storage_options=storage_options ) @@ -914,7 +910,6 @@ def __init__(self, f, engine=None, **kwds): # miscellanea self.engine = engine - self._engine = None self._currow = 0 options = self._get_options_with_defaults(engine) @@ -923,14 +918,13 @@ def __init__(self, f, engine=None, **kwds): self.nrows = options.pop("nrows", None) self.squeeze = options.pop("squeeze", False) - # might mutate self.engine - self.engine = self._check_file_or_buffer(f, engine) + self._check_file_or_buffer(f, engine) self.options, self.engine = self._clean_options(options, engine) if "has_index_names" in kwds: self.options["has_index_names"] = kwds["has_index_names"] - self._make_engine(self.engine) + self._engine = self._make_engine(self.engine) def close(self): self._engine.close() @@ -987,24 +981,21 @@ def _check_file_or_buffer(self, f, engine): msg = "The 'python' engine cannot iterate through this file buffer." raise ValueError(msg) - return engine - def _clean_options(self, options, engine): result = options.copy() engine_specified = self._engine_specified fallback_reason = None - sep = options["delimiter"] - delim_whitespace = options["delim_whitespace"] - # C engine not supported yet if engine == "c": if options["skipfooter"] > 0: fallback_reason = "the 'c' engine does not support skipfooter" engine = "python" - encoding = sys.getfilesystemencoding() or "utf-8" + sep = options["delimiter"] + delim_whitespace = options["delim_whitespace"] + if sep is None and not delim_whitespace: if engine == "c": fallback_reason = ( @@ -1029,6 +1020,7 @@ def _clean_options(self, options, engine): result["delimiter"] = r"\s+" elif sep is not None: encodeable = True + encoding = sys.getfilesystemencoding() or "utf-8" try: if len(sep.encode(encoding)) > 1: encodeable = False @@ -1161,29 +1153,26 @@ def __next__(self): raise def _make_engine(self, engine="c"): - if engine == "c": - self._engine = CParserWrapper(self.f, **self.options) + mapping = { + "c": CParserWrapper, + "python": PythonParser, + "python-fwf": FixedWidthFieldParser, + } + try: + klass = mapping[engine] + except KeyError: + raise ValueError( + f"Unknown engine: {engine} (valid options are {mapping.keys()})" + ) else: - if engine == "python": - klass = PythonParser - elif engine == "python-fwf": - klass = FixedWidthFieldParser - else: - raise ValueError( - f"Unknown engine: {engine} (valid options " - 'are "c", "python", or "python-fwf")' - ) - self._engine = klass(self.f, **self.options) + return klass(self.f, **self.options) def _failover_to_python(self): raise AbstractMethodError(self) def read(self, nrows=None): nrows = validate_integer("nrows", nrows) - ret = self._engine.read(nrows) - - # May alter columns / col_dict - index, columns, col_dict = self._create_index(ret) + index, columns, col_dict = self._engine.read(nrows) if index is None: if col_dict: @@ -1203,10 +1192,6 @@ def read(self, nrows=None): return df[df.columns[0]].copy() return df - def _create_index(self, ret): - index, columns, col_dict = ret - return index, columns, col_dict - def get_chunk(self, size=None): if size is None: size = self.chunksize From 06b3f5d9815b8545264c4726a6fa015766da5b03 Mon Sep 17 00:00:00 2001 From: Felix Claessen <30658763+Flix6x@users.noreply.github.com> Date: Sat, 12 Sep 2020 23:07:52 +0200 Subject: [PATCH 509/632] Resample fix dst transition (#36264) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/resample.py | 6 ++- pandas/tests/resample/test_datetime_index.py | 47 ++++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bce6a735b7b07..f632d87a32a5a 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -308,6 +308,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.count` and :meth:`SeriesGroupBy.sum` returning ``NaN`` for missing categories when grouped on multiple ``Categoricals``. Now returning ``0`` (:issue:`35028`) - Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) +- Bug in :meth:`DataFrame.resample(...)` that would throw a ``ValueError`` when resampling from "D" to "24H" over a transition into daylight savings time (DST) (:issue:`35219`) - Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising an ``TypeError`` (:issue:`35325`) - Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) - Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 7b5154756e613..a2bf631959dd1 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1087,7 +1087,11 @@ def _upsample(self, method, limit=None, fill_value=None): res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling - if limit is None and to_offset(ax.inferred_freq) == self.freq: + if ( + limit is None + and to_offset(ax.inferred_freq) == self.freq + and len(obj) == len(res_index) + ): result = obj.copy() result.index = res_index else: diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 59a0183304c76..9475dcc6981ff 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1740,3 +1740,50 @@ def test_resample_apply_product(): columns=["A", "B"], ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "first,last,freq_in,freq_out,exp_last", + [ + ( + "2020-03-28", + "2020-03-31", + "D", + "24H", + "2020-03-30 01:00", + ), # includes transition into DST + ( + "2020-03-28", + "2020-10-27", + "D", + "24H", + "2020-10-27 00:00", + ), # includes transition into and out of DST + ( + "2020-10-25", + "2020-10-27", + "D", + "24H", + "2020-10-26 23:00", + ), # includes transition out of DST + ( + "2020-03-28", + "2020-03-31", + "24H", + "D", + "2020-03-30 00:00", + ), # same as above, but from 24H to D + ("2020-03-28", "2020-10-27", "24H", "D", "2020-10-27 00:00"), + ("2020-10-25", "2020-10-27", "24H", "D", "2020-10-26 00:00"), + ], +) +def test_resample_calendar_day_with_dst( + first: str, last: str, freq_in: str, freq_out: str, exp_last: str +): + # GH 35219 + ts = pd.Series(1.0, pd.date_range(first, last, freq=freq_in, tz="Europe/Amsterdam")) + result = ts.resample(freq_out).pad() + expected = pd.Series( + 1.0, pd.date_range(first, exp_last, freq=freq_out, tz="Europe/Amsterdam") + ) + tm.assert_series_equal(result, expected) From 39c5e29ad1be85d2ee19006d4f7eb9e151ff48f1 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Sat, 12 Sep 2020 17:12:42 -0400 Subject: [PATCH 510/632] CLN: _wrap_applied_output (#36260) --- pandas/core/groupby/generic.py | 37 +++++++++++++++--------------- pandas/tests/groupby/test_apply.py | 10 ++++++++ 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index e870187fc7952..1552256468ad2 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1203,16 +1203,27 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): values = [x if (x is not None) else backup for x in values] - v = values[0] - - if not isinstance(v, (np.ndarray, Index, Series)) and self.as_index: + if isinstance(first_not_none, (np.ndarray, Index)): + # GH#1738: values is list of arrays of unequal lengths + # fall through to the outer else clause + # TODO: sure this is right? we used to do this + # after raising AttributeError above + return self.obj._constructor_sliced( + values, index=key_index, name=self._selection_name + ) + elif not isinstance(first_not_none, Series): # values are not series or array-like but scalars # self._selection_name not passed through to Series as the # result should not take the name of original selection # of columns - return self.obj._constructor_sliced(values, index=key_index) + if self.as_index: + return self.obj._constructor_sliced(values, index=key_index) + else: + result = DataFrame(values, index=key_index, columns=[self._selection]) + self._insert_inaxis_grouper_inplace(result) + return result - if isinstance(v, Series): + else: all_indexed_same = all_indexes_same((x.index for x in values)) # GH3596 @@ -1253,31 +1264,19 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): if self.axis == 0: index = key_index - columns = v.index.copy() + columns = first_not_none.index.copy() if columns.name is None: # GH6124 - propagate name of Series when it's consistent names = {v.name for v in values} if len(names) == 1: columns.name = list(names)[0] else: - index = v.index + index = first_not_none.index columns = key_index stacked_values = stacked_values.T result = self.obj._constructor(stacked_values, index=index, columns=columns) - elif not self.as_index: - # We add grouping column below, so create a frame here - result = DataFrame(values, index=key_index, columns=[self._selection]) - else: - # GH#1738: values is list of arrays of unequal lengths - # fall through to the outer else clause - # TODO: sure this is right? we used to do this - # after raising AttributeError above - return self.obj._constructor_sliced( - values, index=key_index, name=self._selection_name - ) - # if we have date/time like in the original, then coerce dates # as we are stacking can easily have object dtypes here so = self._selected_obj diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index 3183305fe2933..db5c4af9c6f53 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -381,6 +381,16 @@ def test_apply_frame_to_series(df): tm.assert_numpy_array_equal(result.values, expected.values) +def test_apply_frame_not_as_index_column_name(df): + # GH 35964 - path within _wrap_applied_output not hit by a test + grouped = df.groupby(["A", "B"], as_index=False) + result = grouped.apply(len) + expected = grouped.count().rename(columns={"C": np.nan}).drop(columns="D") + # TODO: Use assert_frame_equal when column name is not np.nan (GH 36306) + tm.assert_index_equal(result.index, expected.index) + tm.assert_numpy_array_equal(result.values, expected.values) + + def test_apply_frame_concat_series(): def trans(group): return group.groupby("B")["C"].sum().sort_values()[:2] From 7e0bf1c8ca62c402f937d7b31472d22d2854aac3 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 14:13:31 -0700 Subject: [PATCH 511/632] REF: implement Categorical._validate_listlike (#36274) --- pandas/core/arrays/categorical.py | 44 ++++++++++++++++++++++--------- pandas/core/dtypes/concat.py | 10 ++----- pandas/core/indexes/category.py | 29 +++----------------- pandas/core/reshape/merge.py | 9 ++----- 4 files changed, 39 insertions(+), 53 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index e73a1404c6434..803acce29a7e4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1716,6 +1716,35 @@ def _box_func(self, i: int): return np.NaN return self.categories[i] + def _validate_listlike(self, target: ArrayLike) -> np.ndarray: + """ + Extract integer codes we can use for comparison. + + Notes + ----- + If a value in target is not present, it gets coded as -1. + """ + + if isinstance(target, Categorical): + # Indexing on codes is more efficient if categories are the same, + # so we can apply some optimizations based on the degree of + # dtype-matching. + if self.categories.equals(target.categories): + # We use the same codes, so can go directly to the engine + codes = target.codes + elif self.is_dtype_equal(target): + # We have the same categories up to a reshuffling of codes. + codes = recode_for_categories( + target.codes, target.categories, self.categories + ) + else: + code_indexer = self.categories.get_indexer(target.categories) + codes = take_1d(code_indexer, target.codes, fill_value=-1) + else: + codes = self.categories.get_indexer(target) + + return codes + # ------------------------------------------------------------------ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None): @@ -1890,11 +1919,8 @@ def _validate_setitem_value(self, value): "Cannot set a Categorical with another, " "without identical categories" ) - if not self.categories.equals(value.categories): - new_codes = recode_for_categories( - value.codes, value.categories, self.categories - ) - value = Categorical.from_codes(new_codes, dtype=self.dtype) + new_codes = self._validate_listlike(value) + value = Categorical.from_codes(new_codes, dtype=self.dtype) rvalue = value if is_list_like(value) else [value] @@ -2164,13 +2190,7 @@ def equals(self, other: object) -> bool: if not isinstance(other, Categorical): return False elif self.is_dtype_equal(other): - if self.categories.equals(other.categories): - # fastpath to avoid re-coding - other_codes = other._codes - else: - other_codes = recode_for_categories( - other.codes, other.categories, self.categories - ) + other_codes = self._validate_listlike(other) return np.array_equal(self._codes, other_codes) return False diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 07904339b93df..60fd959701821 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -301,14 +301,8 @@ def _maybe_unwrap(x): categories = first.categories ordered = first.ordered - if all(first.categories.equals(other.categories) for other in to_union[1:]): - new_codes = np.concatenate([c.codes for c in to_union]) - else: - codes = [first.codes] + [ - recode_for_categories(other.codes, other.categories, first.categories) - for other in to_union[1:] - ] - new_codes = np.concatenate(codes) + all_codes = [first._validate_listlike(x) for x in to_union] + new_codes = np.concatenate(all_codes) if sort_categories and not ignore_order and ordered: raise TypeError("Cannot use sort_categories=True with ordered Categoricals") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 98ef473a13348..19a0910a7a282 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -23,8 +23,7 @@ from pandas.core.dtypes.missing import is_valid_nat_for_dtype, notna from pandas.core import accessor -from pandas.core.algorithms import take_1d -from pandas.core.arrays.categorical import Categorical, contains, recode_for_categories +from pandas.core.arrays.categorical import Categorical, contains import pandas.core.common as com from pandas.core.construction import extract_array import pandas.core.indexes.base as ibase @@ -558,21 +557,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): "method='nearest' not implemented yet for CategoricalIndex" ) - if isinstance(target, CategoricalIndex) and self._values.is_dtype_equal(target): - if self._values.equals(target._values): - # we have the same codes - codes = target.codes - else: - codes = recode_for_categories( - target.codes, target.categories, self._values.categories - ) - else: - if isinstance(target, CategoricalIndex): - code_indexer = self.categories.get_indexer(target.categories) - codes = take_1d(code_indexer, target.codes, fill_value=-1) - else: - codes = self.categories.get_indexer(target) - + codes = self._values._validate_listlike(target._values) indexer, _ = self._engine.get_indexer_non_unique(codes) return ensure_platform_int(indexer) @@ -580,15 +565,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): def get_indexer_non_unique(self, target): target = ibase.ensure_index(target) - if isinstance(target, CategoricalIndex): - # Indexing on codes is more efficient if categories are the same: - if target.categories is self.categories: - target = target.codes - indexer, missing = self._engine.get_indexer_non_unique(target) - return ensure_platform_int(indexer), missing - target = target._values - - codes = self.categories.get_indexer(target) + codes = self._values._validate_listlike(target._values) indexer, missing = self._engine.get_indexer_non_unique(codes) return ensure_platform_int(indexer), missing diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 9f19ea9aefe09..d95355589fd0c 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -43,7 +43,6 @@ from pandas import Categorical, Index, MultiIndex from pandas.core import groupby import pandas.core.algorithms as algos -from pandas.core.arrays.categorical import recode_for_categories import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.frame import _merge_doc @@ -1936,12 +1935,8 @@ def _factorize_keys( ): assert isinstance(lk, Categorical) assert isinstance(rk, Categorical) - if lk.categories.equals(rk.categories): - # if we exactly match in categories, allow us to factorize on codes - rk = rk.codes - else: - # Same categories in different orders -> recode - rk = recode_for_categories(rk.codes, rk.categories, lk.categories) + # Cast rk to encoding so we can compare codes with lk + rk = lk._validate_listlike(rk) lk = ensure_int64(lk.codes) rk = ensure_int64(rk) From 3a6aedc535c4becb5f25c3022a220f279a8f73b9 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 14:14:36 -0700 Subject: [PATCH 512/632] CLN: simplify Categorical comparisons (#36250) --- pandas/core/arrays/categorical.py | 10 +--------- pandas/tests/arrays/categorical/test_operators.py | 7 +------ pandas/tests/indexes/categorical/test_category.py | 10 +--------- 3 files changed, 3 insertions(+), 24 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 803acce29a7e4..4fa6b73932aa4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -76,17 +76,9 @@ def func(self, other): # the same (maybe up to ordering, depending on ordered) msg = "Categoricals can only be compared if 'categories' are the same." - if len(self.categories) != len(other.categories): - raise TypeError(msg + " Categories are different lengths") - elif self.ordered and not (self.categories == other.categories).all(): - raise TypeError(msg) - elif not set(self.categories) == set(other.categories): + if not self.is_dtype_equal(other): raise TypeError(msg) - if not (self.ordered == other.ordered): - raise TypeError( - "Categoricals can only be compared if 'ordered' is the same" - ) if not self.ordered and not self.categories.equals(other.categories): # both unordered and different order other_codes = _get_codes_for_values(other, self.categories) diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index bc5fb51883b3d..9d118f1ed8753 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -79,10 +79,6 @@ def test_comparisons(self): cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"]) - msg = ( - "Categoricals can only be compared if 'categories' are the same. " - "Categories are different lengths" - ) with pytest.raises(TypeError, match=msg): cat_rev > cat_rev_base2 @@ -90,7 +86,6 @@ def test_comparisons(self): cat_unorderd = cat.set_ordered(False) assert not (cat > cat).any() - msg = "Categoricals can only be compared if 'ordered' is the same" with pytest.raises(TypeError, match=msg): cat > cat_unorderd @@ -321,7 +316,7 @@ def test_compare_different_lengths(self): c1 = Categorical([], categories=["a", "b"]) c2 = Categorical([], categories=["a"]) - msg = "Categories are different lengths" + msg = "Categoricals can only be compared if 'categories' are the same." with pytest.raises(TypeError, match=msg): c1 == c2 diff --git a/pandas/tests/indexes/categorical/test_category.py b/pandas/tests/indexes/categorical/test_category.py index b325edb321ed4..a3a06338a0277 100644 --- a/pandas/tests/indexes/categorical/test_category.py +++ b/pandas/tests/indexes/categorical/test_category.py @@ -402,15 +402,7 @@ def test_equals_categorical(self): with pytest.raises(ValueError, match="Lengths must match"): ci1 == Index(["a", "b", "c"]) - msg = ( - "categorical index comparisons must have the same categories " - "and ordered attributes" - "|" - "Categoricals can only be compared if 'categories' are the same. " - "Categories are different lengths" - "|" - "Categoricals can only be compared if 'ordered' is the same" - ) + msg = "Categoricals can only be compared if 'categories' are the same" with pytest.raises(TypeError, match=msg): ci1 == ci2 with pytest.raises(TypeError, match=msg): From 65f78c7628f69e42aea61b4e26ccafe4e5a09741 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 14:15:13 -0700 Subject: [PATCH 513/632] searchsorted numpy compat for Period dtype (#36254) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/arrays/period.py | 7 +++++++ pandas/tests/arrays/test_datetimelike.py | 2 +- pandas/tests/indexes/period/test_searchsorted.py | 9 ++++++++- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f632d87a32a5a..c746b83c5f526 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -228,7 +228,7 @@ Datetimelike - Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) - Bug in :meth:`DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`DatetimeIndex` (:issue:`35690`) - Bug in :meth:`DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`) -- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64`` or ``timedelta64`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`) +- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or ``Period`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`,:issue:`36254`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 865b1680c008a..44c0455018a42 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -587,6 +587,13 @@ def astype(self, dtype, copy: bool = True): return self.asfreq(dtype.freq) return super().astype(dtype, copy=copy) + def searchsorted(self, value, side="left", sorter=None): + value = self._validate_searchsorted_value(value).view("M8[ns]") + + # Cast to M8 to get datetime-like NaT placement + m8arr = self._ndarray.view("M8[ns]") + return m8arr.searchsorted(value, side=side, sorter=sorter) + # ------------------------------------------------------------------ # Arithmetic Methods diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 9d316c38082af..624335fd78b0f 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -244,7 +244,7 @@ def test_searchsorted(self): # GH#29884 match numpy convention on whether NaT goes # at the end or the beginning result = arr.searchsorted(pd.NaT) - if np_version_under1p18 or self.array_cls is PeriodArray: + if np_version_under1p18: # Following numpy convention, NaT goes at the beginning # (unlike NaN which goes at the end) assert result == 0 diff --git a/pandas/tests/indexes/period/test_searchsorted.py b/pandas/tests/indexes/period/test_searchsorted.py index f5a2583bf2e10..f2950b9f6065c 100644 --- a/pandas/tests/indexes/period/test_searchsorted.py +++ b/pandas/tests/indexes/period/test_searchsorted.py @@ -2,6 +2,7 @@ import pytest from pandas._libs.tslibs import IncompatibleFrequency +from pandas.compat.numpy import np_version_under1p18 from pandas import NaT, Period, PeriodIndex, Series, array import pandas._testing as tm @@ -21,7 +22,13 @@ def test_searchsorted(self, freq): p2 = Period("2014-01-04", freq=freq) assert pidx.searchsorted(p2) == 3 - assert pidx.searchsorted(NaT) == 0 + if np_version_under1p18: + # GH#36254 + # Following numpy convention, NaT goes at the beginning + # (unlike NaN which goes at the end) + assert pidx.searchsorted(NaT) == 0 + else: + assert pidx.searchsorted(NaT) == 5 msg = "Input has different freq=H from PeriodArray" with pytest.raises(IncompatibleFrequency, match=msg): From a9f8d3c0de14cfbe4c76a695c995fab632cbcfad Mon Sep 17 00:00:00 2001 From: Asish Mahapatra Date: Sat, 12 Sep 2020 17:17:48 -0400 Subject: [PATCH 514/632] BUG: na parameter for str.startswith and str.endswith not propagating for Series with categorical dtype (#36249) --- doc/source/whatsnew/v1.1.3.rst | 2 +- pandas/core/strings.py | 2 +- pandas/tests/test_strings.py | 40 +++++++++++++++++++++++++++------- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index e3161012da5d1..c06990e3f2051 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -22,7 +22,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- +- Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6702bf519c52e..4decd86764ccc 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2050,7 +2050,7 @@ def wrapper2(self, pat, flags=0, **kwargs): @forbid_nonstring_types(forbidden_types, name=name) def wrapper3(self, pat, na=np.nan): result = f(self._parent, pat, na=na) - return self._wrap_result(result, returns_string=returns_string) + return self._wrap_result(result, returns_string=returns_string, fill_value=na) wrapper = wrapper3 if na else wrapper2 if flags else wrapper1 diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index d9396d70f9112..c792a48d3ef08 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -29,6 +29,8 @@ def assert_series_or_index_equal(left, right): ("decode", ("UTF-8",), {}), ("encode", ("UTF-8",), {}), ("endswith", ("a",), {}), + ("endswith", ("a",), {"na": True}), + ("endswith", ("a",), {"na": False}), ("extract", ("([a-z]*)",), {"expand": False}), ("extract", ("([a-z]*)",), {"expand": True}), ("extractall", ("([a-z]*)",), {}), @@ -58,6 +60,8 @@ def assert_series_or_index_equal(left, right): ("split", (" ",), {"expand": False}), ("split", (" ",), {"expand": True}), ("startswith", ("a",), {}), + ("startswith", ("a",), {"na": True}), + ("startswith", ("a",), {"na": False}), # translating unicode points of "a" to "d" ("translate", ({97: 100},), {}), ("wrap", (2,), {}), @@ -838,15 +842,23 @@ def test_contains_for_object_category(self): expected = Series([True, False, False, True, False]) tm.assert_series_equal(result, expected) - def test_startswith(self): - values = Series(["om", np.nan, "foo_nom", "nom", "bar_foo", np.nan, "foo"]) + @pytest.mark.parametrize("dtype", [None, "category"]) + @pytest.mark.parametrize("null_value", [None, np.nan, pd.NA]) + @pytest.mark.parametrize("na", [True, False]) + def test_startswith(self, dtype, null_value, na): + # add category dtype parametrizations for GH-36241 + values = Series( + ["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"], + dtype=dtype, + ) result = values.str.startswith("foo") exp = Series([False, np.nan, True, False, False, np.nan, True]) tm.assert_series_equal(result, exp) - result = values.str.startswith("foo", na=True) - tm.assert_series_equal(result, exp.fillna(True).astype(bool)) + result = values.str.startswith("foo", na=na) + exp = Series([False, na, True, False, False, na, True]) + tm.assert_series_equal(result, exp) # mixed mixed = np.array( @@ -867,15 +879,23 @@ def test_startswith(self): ) tm.assert_series_equal(rs, xp) - def test_endswith(self): - values = Series(["om", np.nan, "foo_nom", "nom", "bar_foo", np.nan, "foo"]) + @pytest.mark.parametrize("dtype", [None, "category"]) + @pytest.mark.parametrize("null_value", [None, np.nan, pd.NA]) + @pytest.mark.parametrize("na", [True, False]) + def test_endswith(self, dtype, null_value, na): + # add category dtype parametrizations for GH-36241 + values = Series( + ["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"], + dtype=dtype, + ) result = values.str.endswith("foo") exp = Series([False, np.nan, False, False, True, np.nan, True]) tm.assert_series_equal(result, exp) - result = values.str.endswith("foo", na=False) - tm.assert_series_equal(result, exp.fillna(False).astype(bool)) + result = values.str.endswith("foo", na=na) + exp = Series([False, na, False, False, True, na, True]) + tm.assert_series_equal(result, exp) # mixed mixed = np.array( @@ -3552,6 +3572,10 @@ def test_string_array(any_string_method): assert result.dtype == "boolean" result = result.astype(object) + elif expected.dtype == "bool": + assert result.dtype == "boolean" + result = result.astype("bool") + elif expected.dtype == "float" and expected.isna().any(): assert result.dtype == "Int64" result = result.astype("float") From cb58dbbb4adbce80a82fa30b853300310617122f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 14:20:29 -0700 Subject: [PATCH 515/632] PERF: JoinUnit.is_na (#36312) --- pandas/core/dtypes/missing.py | 39 ++++++++++++++++++++++++++++++++- pandas/core/internals/concat.py | 13 ++++------- 2 files changed, 42 insertions(+), 10 deletions(-) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 163500525dbd8..d2e4974741b88 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -9,7 +9,7 @@ from pandas._libs import lib import pandas._libs.missing as libmissing -from pandas._libs.tslibs import NaT, iNaT +from pandas._libs.tslibs import NaT, Period, iNaT from pandas._typing import ArrayLike, DtypeObj from pandas.core.dtypes.common import ( @@ -43,6 +43,9 @@ isposinf_scalar = libmissing.isposinf_scalar isneginf_scalar = libmissing.isneginf_scalar +nan_checker = np.isnan +INF_AS_NA = False + def isna(obj): """ @@ -188,6 +191,12 @@ def _use_inf_as_na(key): """ inf_as_na = get_option(key) globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na) + if inf_as_na: + globals()["nan_checker"] = lambda x: ~np.isfinite(x) + globals()["INF_AS_NA"] = True + else: + globals()["nan_checker"] = np.isnan + globals()["INF_AS_NA"] = False def _isna_ndarraylike(obj, inf_as_na: bool = False): @@ -602,3 +611,31 @@ def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool: # must be PeriodDType return not isinstance(obj, (np.datetime64, np.timedelta64)) + + +def isna_all(arr: ArrayLike) -> bool: + """ + Optimized equivalent to isna(arr).all() + """ + total_len = len(arr) + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. + # parameters 1000 and 40 were chosen arbitrarily + chunk_len = max(total_len // 40, 1000) + + dtype = arr.dtype + if dtype.kind == "f": + checker = nan_checker + + elif dtype.kind in ["m", "M"] or dtype.type is Period: + checker = lambda x: np.asarray(x.view("i8")) == iNaT + + else: + checker = lambda x: _isna_ndarraylike(x, inf_as_na=INF_AS_NA) + + for i in range(0, total_len, chunk_len): + if not checker(arr[i : i + chunk_len]).all(): + return False + + return True diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 513c5fed1ca62..f5d0c921e1006 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -21,7 +21,7 @@ is_timedelta64_dtype, ) from pandas.core.dtypes.concat import concat_compat -from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.missing import isna_all import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray, ExtensionArray @@ -223,13 +223,8 @@ def is_na(self): values_flat = values else: values_flat = values.ravel(order="K") - total_len = values_flat.shape[0] - chunk_len = max(total_len // 40, 1000) - for i in range(0, total_len, chunk_len): - if not isna(values_flat[i : i + chunk_len]).all(): - return False - return True + return isna_all(values_flat) def get_reindexed_values(self, empty_dtype, upcasted_na): if upcasted_na is None: @@ -474,8 +469,8 @@ def _is_uniform_join_units(join_units: List[JoinUnit]) -> bool: # cannot necessarily join return ( # all blocks need to have the same type - all(type(ju.block) is type(join_units[0].block) for ju in join_units) - and # noqa + all(type(ju.block) is type(join_units[0].block) for ju in join_units) # noqa + and # no blocks that would get missing values (can lead to type upcasts) # unless we're an extension dtype. all(not ju.is_na or ju.block.is_extension for ju in join_units) From 6100425aae528542291a5da5864dde0299d0f31a Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Sat, 12 Sep 2020 22:21:15 +0100 Subject: [PATCH 516/632] PERF: creating string Series/Arrays from sequence with many strings (#36304) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/lib.pyx | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index c746b83c5f526..00cbb248e4690 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -205,6 +205,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ +- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`) - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 7464fafee2b94..cc63df90a9a9f 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -655,6 +655,10 @@ cpdef ndarray[object] ensure_string_array( for i in range(n): val = result[i] + + if isinstance(val, str): + continue + if not checknull(val): result[i] = str(val) else: From c6e3af7170f87d387fa11c8cda22aa6da28de495 Mon Sep 17 00:00:00 2001 From: Yanxian Lin Date: Sat, 12 Sep 2020 14:23:20 -0700 Subject: [PATCH 517/632] TST: add test case for sort_index on multiindexed Frame with sparse columns (#36236) --- pandas/tests/frame/methods/test_sort_index.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index dcc33428d18a5..a106702aff807 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -739,3 +739,18 @@ def test_changes_length_raises(self): df = pd.DataFrame({"A": [1, 2, 3]}) with pytest.raises(ValueError, match="change the shape"): df.sort_index(key=lambda x: x[:1]) + + def test_sort_index_multiindex_sparse_column(self): + # GH 29735, testing that sort_index on a multiindexed frame with sparse + # columns fills with 0. + expected = pd.DataFrame( + { + i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0)) + for i in range(0, 4) + }, + index=pd.MultiIndex.from_product([[1, 2], [1, 2]]), + ) + + result = expected.sort_index(level=0) + + tm.assert_frame_equal(result, expected) From c10462245a388655145b9d2fdd2d1765b8057aeb Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 14:28:00 -0700 Subject: [PATCH 518/632] REF: use BlockManager.apply in csv code (#36150) --- pandas/core/internals/blocks.py | 16 +++++++++------- pandas/io/formats/csvs.py | 9 ++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c8da04fbbf987..eb5b887c8b0cb 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -593,7 +593,7 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): # use native type formatting for datetime/tz/timedelta if self.is_datelike: - values = self.to_native_types() + values = self.to_native_types().values # astype formatting else: @@ -684,7 +684,7 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs): values = np.array(values, dtype="object") values[mask] = na_rep - return values + return self.make_block(values) # block actions # def copy(self, deep: bool = True): @@ -1774,7 +1774,7 @@ def to_native_types(self, na_rep="nan", quoting=None, **kwargs): # TODO(EA2D): reshape not needed with 2D EAs # we are expected to return a 2-d ndarray - return values.reshape(1, len(values)) + return self.make_block(values) def take_nd( self, indexer, axis: int = 0, new_mgr_locs=None, fill_value=lib.no_default @@ -2021,7 +2021,7 @@ def to_native_types( values = np.array(values, dtype="object") values[mask] = na_rep - return values + return self.make_block(values) from pandas.io.formats.format import FloatArrayFormatter @@ -2033,7 +2033,8 @@ def to_native_types( quoting=quoting, fixed_width=False, ) - return formatter.get_result_as_array() + res = formatter.get_result_as_array() + return self.make_block(res) class ComplexBlock(FloatOrComplexBlock): @@ -2192,7 +2193,7 @@ def to_native_types(self, na_rep="NaT", date_format=None, **kwargs): result = dta._format_native_types( na_rep=na_rep, date_format=date_format, **kwargs ) - return np.atleast_2d(result) + return self.make_block(result) def set(self, locs, values): """ @@ -2408,7 +2409,8 @@ def fillna(self, value, **kwargs): def to_native_types(self, na_rep="NaT", **kwargs): """ convert to our native types format """ tda = self.array_values() - return tda._format_native_types(na_rep, **kwargs) + res = tda._format_native_types(na_rep, **kwargs) + return self.make_block(res) class BoolBlock(NumericBlock): diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 90ab6f61f4d74..1bda16d126905 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -341,12 +341,11 @@ def _save_chunk(self, start_i: int, end_i: int) -> None: slicer = slice(start_i, end_i) df = self.obj.iloc[slicer] + mgr = df._mgr - for block in df._mgr.blocks: - d = block.to_native_types(**self._number_format) - - for col_loc, col in zip(block.mgr_locs, d): - data[col_loc] = col + res = mgr.apply("to_native_types", **self._number_format) + for i in range(len(res.items)): + data[i] = res.iget_values(i) ix = self.data_index.to_native_types(slicer=slicer, **self._number_format) libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer) From 4729d8f6e6e7c0e71fee3808ea8dee5106a6b02d Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 14:30:32 -0700 Subject: [PATCH 519/632] STY/WIP: check for private imports/lookups (#36055) --- Makefile | 6 +++ ci/code_checks.sh | 14 ++++-- pandas/core/arrays/datetimelike.py | 10 ++-- pandas/core/arrays/integer.py | 8 ++-- pandas/core/dtypes/cast.py | 6 ++- pandas/core/groupby/groupby.py | 6 +-- pandas/core/indexes/datetimes.py | 4 +- pandas/core/resample.py | 9 +++- pandas/core/window/ewm.py | 4 +- pandas/core/window/expanding.py | 4 +- pandas/core/window/rolling.py | 6 +-- pandas/io/formats/format.py | 10 ++-- scripts/validate_unwanted_patterns.py | 66 ++++++++++++++++++++++++++- 13 files changed, 119 insertions(+), 34 deletions(-) diff --git a/Makefile b/Makefile index 4a9a48992f92f..b915d8840cd8d 100644 --- a/Makefile +++ b/Makefile @@ -32,3 +32,9 @@ check: --included-file-extensions="py" \ --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored \ pandas/ + + python3 scripts/validate_unwanted_patterns.py \ + --validation-type="private_import_across_module" \ + --included-file-extensions="py" \ + --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/ + pandas/ diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 875f1dbb83ce3..54aa830379c07 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -116,11 +116,19 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then fi RET=$(($RET + $?)) ; echo $MSG "DONE" - MSG='Check for use of private module attribute access' ; echo $MSG + MSG='Check for import of private attributes across modules' ; echo $MSG if [[ "$GITHUB_ACTIONS" == "true" ]]; then - $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored --format="##[error]{source_path}:{line_number}:{msg}" pandas/ + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored --format="##[error]{source_path}:{line_number}:{msg}" pandas/ else - $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored pandas/ + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_import_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored pandas/ + fi + RET=$(($RET + $?)) ; echo $MSG "DONE" + + MSG='Check for use of private functions across modules' ; echo $MSG + if [[ "$GITHUB_ACTIONS" == "true" ]]; then + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/ --format="##[error]{source_path}:{line_number}:{msg}" pandas/ + else + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored,doc/ pandas/ fi RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 6302b48cb1978..b013246e724de 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -54,7 +54,7 @@ from pandas.core import missing, nanops, ops from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts -from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.base import ExtensionOpsMixin import pandas.core.common as com from pandas.core.construction import array, extract_array @@ -472,11 +472,11 @@ class DatetimeLikeArrayMixin( def _ndarray(self) -> np.ndarray: return self._data - def _from_backing_data(self: _T, arr: np.ndarray) -> _T: + def _from_backing_data( + self: DatetimeLikeArrayT, arr: np.ndarray + ) -> DatetimeLikeArrayT: # Note: we do not retain `freq` - return type(self)._simple_new( # type: ignore[attr-defined] - arr, dtype=self.dtype - ) + return type(self)._simple_new(arr, dtype=self.dtype) # ------------------------------------------------------------------ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index d83ff91a1315f..dc08e018397bc 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -106,7 +106,7 @@ def _get_common_dtype(self, dtypes: List[DtypeObj]) -> Optional[DtypeObj]: [t.numpy_dtype if isinstance(t, BaseMaskedDtype) else t for t in dtypes], [] ) if np.issubdtype(np_dtype, np.integer): - return _dtypes[str(np_dtype)] + return STR_TO_DTYPE[str(np_dtype)] return None def __from_arrow__( @@ -214,7 +214,7 @@ def coerce_to_array( if not issubclass(type(dtype), _IntegerDtype): try: - dtype = _dtypes[str(np.dtype(dtype))] + dtype = STR_TO_DTYPE[str(np.dtype(dtype))] except KeyError as err: raise ValueError(f"invalid dtype specified {dtype}") from err @@ -354,7 +354,7 @@ class IntegerArray(BaseMaskedArray): @cache_readonly def dtype(self) -> _IntegerDtype: - return _dtypes[str(self._data.dtype)] + return STR_TO_DTYPE[str(self._data.dtype)] def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): if not (isinstance(values, np.ndarray) and values.dtype.kind in ["i", "u"]): @@ -735,7 +735,7 @@ class UInt64Dtype(_IntegerDtype): __doc__ = _dtype_docstring.format(dtype="uint64") -_dtypes: Dict[str, _IntegerDtype] = { +STR_TO_DTYPE: Dict[str, _IntegerDtype] = { "int8": Int8Dtype(), "int16": Int16Dtype(), "int32": Int32Dtype(), diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index ba1b0b075936d..64ccc0be0a25d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1151,9 +1151,11 @@ def convert_dtypes( target_int_dtype = "Int64" if is_integer_dtype(input_array.dtype): - from pandas.core.arrays.integer import _dtypes + from pandas.core.arrays.integer import STR_TO_DTYPE - inferred_dtype = _dtypes.get(input_array.dtype.name, target_int_dtype) + inferred_dtype = STR_TO_DTYPE.get( + input_array.dtype.name, target_int_dtype + ) if not is_integer_dtype(input_array.dtype) and is_numeric_dtype( input_array.dtype ): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 1e3e56f4ff09f..8a55d438cf8d4 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -459,7 +459,7 @@ def f(self): @contextmanager -def group_selection_context(groupby: "_GroupBy"): +def group_selection_context(groupby: "BaseGroupBy"): """ Set / reset the group_selection_context. """ @@ -479,7 +479,7 @@ def group_selection_context(groupby: "_GroupBy"): ] -class _GroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): +class BaseGroupBy(PandasObject, SelectionMixin, Generic[FrameOrSeries]): _group_selection = None _apply_allowlist: FrozenSet[str] = frozenset() @@ -1212,7 +1212,7 @@ def _apply_filter(self, indices, dropna): OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) -class GroupBy(_GroupBy[FrameOrSeries]): +class GroupBy(BaseGroupBy[FrameOrSeries]): """ Class for grouping and aggregating relational data. diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f0b80c2852bd5..f269495f6011a 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -312,9 +312,9 @@ def _is_dates_only(self) -> bool: ------- bool """ - from pandas.io.formats.format import _is_dates_only + from pandas.io.formats.format import is_dates_only - return self.tz is None and _is_dates_only(self._values) + return self.tz is None and is_dates_only(self._values) def __reduce__(self): diff --git a/pandas/core/resample.py b/pandas/core/resample.py index a2bf631959dd1..4ba253e76128e 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -26,7 +26,12 @@ from pandas.core.generic import NDFrame, _shared_docs from pandas.core.groupby.base import GroupByMixin from pandas.core.groupby.generic import SeriesGroupBy -from pandas.core.groupby.groupby import GroupBy, _GroupBy, _pipe_template, get_groupby +from pandas.core.groupby.groupby import ( + BaseGroupBy, + GroupBy, + _pipe_template, + get_groupby, +) from pandas.core.groupby.grouper import Grouper from pandas.core.groupby.ops import BinGrouper from pandas.core.indexes.api import Index @@ -40,7 +45,7 @@ _shared_docs_kwargs: Dict[str, str] = dict() -class Resampler(_GroupBy, ShallowMixin): +class Resampler(BaseGroupBy, ShallowMixin): """ Class for resampling datetimelike data, a groupby-like operation. See aggregate, transform, and apply functions on this object. diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 2bd36d8bff155..4282cb41c4e91 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -15,7 +15,7 @@ import pandas.core.common as common from pandas.core.window.common import _doc_template, _shared_docs, zsqrt -from pandas.core.window.rolling import _Rolling, flex_binary_moment +from pandas.core.window.rolling import RollingMixin, flex_binary_moment _bias_template = """ Parameters @@ -60,7 +60,7 @@ def get_center_of_mass( return float(comass) -class ExponentialMovingWindow(_Rolling): +class ExponentialMovingWindow(RollingMixin): r""" Provide exponential weighted (EW) functions. diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index ce4ab2f98c23d..46e002324ec75 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -5,10 +5,10 @@ from pandas.util._decorators import Appender, Substitution, doc from pandas.core.window.common import WindowGroupByMixin, _doc_template, _shared_docs -from pandas.core.window.rolling import _Rolling_and_Expanding +from pandas.core.window.rolling import RollingAndExpandingMixin -class Expanding(_Rolling_and_Expanding): +class Expanding(RollingAndExpandingMixin): """ Provide expanding transformations. diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 5a7482076903c..648ab4d25be83 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1214,13 +1214,13 @@ def std(self, ddof=1, *args, **kwargs): return zsqrt(self.var(ddof=ddof, name="std", **kwargs)) -class _Rolling(_Window): +class RollingMixin(_Window): @property def _constructor(self): return Rolling -class _Rolling_and_Expanding(_Rolling): +class RollingAndExpandingMixin(RollingMixin): _shared_docs["count"] = dedent( r""" @@ -1917,7 +1917,7 @@ def _get_corr(a, b): ) -class Rolling(_Rolling_and_Expanding): +class Rolling(RollingAndExpandingMixin): @cache_readonly def is_datetimelike(self) -> bool: return isinstance( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 6781d98ded41d..77f2a53fc7fab 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1586,7 +1586,7 @@ def format_percentiles( return [i + "%" for i in out] -def _is_dates_only( +def is_dates_only( values: Union[np.ndarray, DatetimeArray, Index, DatetimeIndex] ) -> bool: # return a boolean if we are only dates (and don't have a timezone) @@ -1658,8 +1658,8 @@ def get_format_datetime64_from_values( # only accepts 1D values values = values.ravel() - is_dates_only = _is_dates_only(values) - if is_dates_only: + ido = is_dates_only(values) + if ido: return date_format or "%Y-%m-%d" return date_format @@ -1668,9 +1668,9 @@ class Datetime64TZFormatter(Datetime64Formatter): def _format_strings(self) -> List[str]: """ we by definition have a TZ """ values = self.values.astype(object) - is_dates_only = _is_dates_only(values) + ido = is_dates_only(values) formatter = self.formatter or get_format_datetime64( - is_dates_only, date_format=self.date_format + ido, date_format=self.date_format ) fmt_values = [formatter(x) for x in values] diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 2add2b8c62a4e..4a0e859535215 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -18,6 +18,39 @@ import tokenize from typing import IO, Callable, FrozenSet, Iterable, List, Set, Tuple +PRIVATE_IMPORTS_TO_IGNORE: Set[str] = { + "_extension_array_shared_docs", + "_index_shared_docs", + "_interval_shared_docs", + "_merge_doc", + "_shared_docs", + "_apply_docs", + "_new_Index", + "_new_PeriodIndex", + "_doc_template", + "_agg_template", + "_pipe_template", + "_get_version", + "__main__", + "_transform_template", + "_arith_doc_FRAME", + "_flex_comp_doc_FRAME", + "_make_flex_doc", + "_op_descriptions", + "_IntegerDtype", + "_use_inf_as_na", + "_get_plot_backend", + "_matplotlib", + "_arrow_utils", + "_registry", + "_get_offset", # TODO: remove after get_offset deprecation enforced + "_test_parse_iso8601", + "_json_normalize", # TODO: remove after deprecation is enforced + "_testing", + "_test_decorators", + "__version__", # check np.__version__ in compat.numpy.function +} + def _get_literal_string_prefix_len(token_string: str) -> int: """ @@ -164,6 +197,36 @@ def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str yield (node.lineno, f"Private function '{module_name}.{function_name}'") +def private_import_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: + """ + Checking that a private function is not imported across modules. + Parameters + ---------- + file_obj : IO + File-like object containing the Python code to validate. + Yields + ------ + line_number : int + Line number of import statement, that imports the private function. + msg : str + Explenation of the error. + """ + contents = file_obj.read() + tree = ast.parse(contents) + + for node in ast.walk(tree): + if not (isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)): + continue + + for module in node.names: + module_name = module.name.split(".")[-1] + if module_name in PRIVATE_IMPORTS_TO_IGNORE: + continue + + if module_name.startswith("_"): + yield (node.lineno, f"Import of internal function {repr(module_name)}") + + def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: """ This test case is necessary after 'Black' (https://github.com/psf/black), @@ -419,6 +482,7 @@ def main( available_validation_types: List[str] = [ "bare_pytest_raises", "private_function_across_module", + "private_import_across_module", "strings_to_concatenate", "strings_with_wrong_placed_whitespace", ] @@ -449,7 +513,7 @@ def main( parser.add_argument( "--excluded-file-paths", default="asv_bench/env", - help="Comma separated file extensions to check.", + help="Comma separated file paths to exclude.", ) args = parser.parse_args() From bed96566728b642572bb0880aee35c03734667e9 Mon Sep 17 00:00:00 2001 From: Sam Ezebunandu Date: Sat, 12 Sep 2020 15:31:58 -0600 Subject: [PATCH 520/632] DOC: Fix DataFrame.query contradiction on use of Python keywords as identifiers (#36311) --- pandas/core/frame.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b03593ad8afe1..b8b49156a0967 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3238,11 +3238,12 @@ def query(self, expr, inplace=False, **kwargs): in the environment by prefixing them with an '@' character like ``@a + b``. - You can refer to column names that contain spaces or operators by - surrounding them in backticks. This way you can also escape - names that start with a digit, or those that are a Python keyword. - Basically when it is not valid Python identifier. See notes down - for more details. + You can refer to column names that are not valid Python variable names + by surrounding them in backticks. Thus, column names containing spaces + or punctuations (besides underscores) or starting with digits must be + surrounded by backticks. (For example, a column named "Area (cm^2) would + be referenced as `Area (cm^2)`). Column names which are Python keywords + (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. From ab5b38d560e266ac0d813ffe83d025420fbf98af Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Sat, 12 Sep 2020 17:36:51 -0400 Subject: [PATCH 521/632] BUG/CLN: Decouple Series/DataFrame.transform (#35964) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/aggregation.py | 98 +++++++- pandas/core/base.py | 4 +- pandas/core/frame.py | 16 +- pandas/core/generic.py | 74 ------ pandas/core/series.py | 14 +- pandas/core/shared_docs.py | 69 ++++++ .../tests/frame/apply/test_frame_transform.py | 227 ++++++++++++++---- .../tests/series/apply/test_series_apply.py | 5 +- .../series/apply/test_series_transform.py | 168 ++++++++++--- 10 files changed, 507 insertions(+), 169 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 00cbb248e4690..af2960b5038b2 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -344,6 +344,7 @@ Other ^^^^^ - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) +- Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 7ca68d8289bd5..8b74fe01d0dc0 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -18,9 +18,10 @@ Union, ) -from pandas._typing import AggFuncType, FrameOrSeries, Label +from pandas._typing import AggFuncType, Axis, FrameOrSeries, Label from pandas.core.dtypes.common import is_dict_like, is_list_like +from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.base import SpecificationError import pandas.core.common as com @@ -384,3 +385,98 @@ def validate_func_kwargs( if not columns: raise TypeError(no_arg_message) return columns, func + + +def transform( + obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs, +) -> FrameOrSeries: + """ + Transform a DataFrame or Series + + Parameters + ---------- + obj : DataFrame or Series + Object to compute the transform on. + func : string, function, list, or dictionary + Function(s) to compute the transform with. + axis : {0 or 'index', 1 or 'columns'} + Axis along which the function is applied: + + * 0 or 'index': apply function to each column. + * 1 or 'columns': apply function to each row. + + Returns + ------- + DataFrame or Series + Result of applying ``func`` along the given axis of the + Series or DataFrame. + + Raises + ------ + ValueError + If the transform function fails or does not transform. + """ + from pandas.core.reshape.concat import concat + + is_series = obj.ndim == 1 + + if obj._get_axis_number(axis) == 1: + assert not is_series + return transform(obj.T, func, 0, *args, **kwargs).T + + if isinstance(func, list): + if is_series: + func = {com.get_callable_name(v) or v: v for v in func} + else: + func = {col: func for col in obj} + + if isinstance(func, dict): + if not is_series: + cols = sorted(set(func.keys()) - set(obj.columns)) + if len(cols) > 0: + raise SpecificationError(f"Column(s) {cols} do not exist") + + if any(isinstance(v, dict) for v in func.values()): + # GH 15931 - deprecation of renaming keys + raise SpecificationError("nested renamer is not supported") + + results = {} + for name, how in func.items(): + colg = obj._gotitem(name, ndim=1) + try: + results[name] = transform(colg, how, 0, *args, **kwargs) + except Exception as e: + if str(e) == "Function did not transform": + raise e + + # combine results + if len(results) == 0: + raise ValueError("Transform function failed") + return concat(results, axis=1) + + # func is either str or callable + try: + if isinstance(func, str): + result = obj._try_aggregate_string_function(func, *args, **kwargs) + else: + f = obj._get_cython_func(func) + if f and not args and not kwargs: + result = getattr(obj, f)() + else: + try: + result = obj.apply(func, args=args, **kwargs) + except Exception: + result = func(obj, *args, **kwargs) + except Exception: + raise ValueError("Transform function failed") + + # Functions that transform may return empty Series/DataFrame + # when the dtype is not appropriate + if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty: + raise ValueError("Transform function failed") + if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( + obj.index + ): + raise ValueError("Function did not transform") + + return result diff --git a/pandas/core/base.py b/pandas/core/base.py index 1926803d8f04b..a688302b99724 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -4,7 +4,7 @@ import builtins import textwrap -from typing import Any, Dict, FrozenSet, List, Optional, Union +from typing import Any, Callable, Dict, FrozenSet, List, Optional, Union import numpy as np @@ -560,7 +560,7 @@ def _aggregate_multiple_funcs(self, arg, _axis): ) from err return result - def _get_cython_func(self, arg: str) -> Optional[str]: + def _get_cython_func(self, arg: Callable) -> Optional[str]: """ if we define an internal function for this argument, return it """ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b8b49156a0967..e9b4fd237c2c2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -45,6 +45,7 @@ from pandas._libs import algos as libalgos, lib, properties from pandas._libs.lib import no_default from pandas._typing import ( + AggFuncType, ArrayLike, Axes, Axis, @@ -116,7 +117,7 @@ from pandas.core import algorithms, common as com, nanops, ops from pandas.core.accessor import CachedAccessor -from pandas.core.aggregation import reconstruct_func, relabel_result +from pandas.core.aggregation import reconstruct_func, relabel_result, transform from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray from pandas.core.arrays.sparse import SparseFrameAccessor @@ -7462,15 +7463,16 @@ def _aggregate(self, arg, axis=0, *args, **kwargs): agg = aggregate @doc( - NDFrame.transform, + _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) - def transform(self, func, axis=0, *args, **kwargs) -> DataFrame: - axis = self._get_axis_number(axis) - if axis == 1: - return self.T.transform(func, *args, **kwargs).T - return super().transform(func, *args, **kwargs) + def transform( + self, func: AggFuncType, axis: Axis = 0, *args, **kwargs + ) -> DataFrame: + result = transform(self, func, axis, *args, **kwargs) + assert isinstance(result, DataFrame) + return result def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): """ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fffd2e068ebcf..9ed9db801d0a8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10648,80 +10648,6 @@ def ewm( times=times, ) - @doc(klass=_shared_doc_kwargs["klass"], axis="") - def transform(self, func, *args, **kwargs): - """ - Call ``func`` on self producing a {klass} with transformed values. - - Produced {klass} will have same axis length as self. - - Parameters - ---------- - func : function, str, list or dict - Function to use for transforming the data. If a function, must either - work when passed a {klass} or when passed to {klass}.apply. - - Accepted combinations are: - - - function - - string function name - - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']`` - - dict of axis labels -> functions, function names or list of such. - {axis} - *args - Positional arguments to pass to `func`. - **kwargs - Keyword arguments to pass to `func`. - - Returns - ------- - {klass} - A {klass} that must have the same length as self. - - Raises - ------ - ValueError : If the returned {klass} has a different length than self. - - See Also - -------- - {klass}.agg : Only perform aggregating type operations. - {klass}.apply : Invoke function on a {klass}. - - Examples - -------- - >>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}}) - >>> df - A B - 0 0 1 - 1 1 2 - 2 2 3 - >>> df.transform(lambda x: x + 1) - A B - 0 1 2 - 1 2 3 - 2 3 4 - - Even though the resulting {klass} must have the same length as the - input {klass}, it is possible to provide several input functions: - - >>> s = pd.Series(range(3)) - >>> s - 0 0 - 1 1 - 2 2 - dtype: int64 - >>> s.transform([np.sqrt, np.exp]) - sqrt exp - 0 0.000000 1.000000 - 1 1.000000 2.718282 - 2 1.414214 7.389056 - """ - result = self.agg(func, *args, **kwargs) - if is_scalar(result) or len(result) != len(self): - raise ValueError("transforms cannot produce aggregated results") - - return result - # ---------------------------------------------------------------------- # Misc methods diff --git a/pandas/core/series.py b/pandas/core/series.py index 6cbd93135a2ca..632b93cdcf24b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -25,6 +25,7 @@ from pandas._libs import lib, properties, reshape, tslibs from pandas._libs.lib import no_default from pandas._typing import ( + AggFuncType, ArrayLike, Axis, DtypeObj, @@ -89,6 +90,7 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import check_bool_indexer from pandas.core.internals import SingleBlockManager +from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ensure_key_mapped from pandas.core.strings import StringMethods from pandas.core.tools.datetimes import to_datetime @@ -4081,14 +4083,16 @@ def aggregate(self, func=None, axis=0, *args, **kwargs): agg = aggregate @doc( - NDFrame.transform, + _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) - def transform(self, func, axis=0, *args, **kwargs): - # Validate the axis parameter - self._get_axis_number(axis) - return super().transform(func, *args, **kwargs) + def transform( + self, func: AggFuncType, axis: Axis = 0, *args, **kwargs + ) -> FrameOrSeriesUnion: + from pandas.core.aggregation import transform + + return transform(self, func, axis, *args, **kwargs) def apply(self, func, convert_dtype=True, args=(), **kwds): """ diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 0aaccb47efc44..244ee3aa298db 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -257,3 +257,72 @@ 1 b B E 3 2 c B E 5 """ + +_shared_docs[ + "transform" +] = """\ +Call ``func`` on self producing a {klass} with transformed values. + +Produced {klass} will have same axis length as self. + +Parameters +---------- +func : function, str, list or dict + Function to use for transforming the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']`` + - dict of axis labels -> functions, function names or list of such. +{axis} +*args + Positional arguments to pass to `func`. +**kwargs + Keyword arguments to pass to `func`. + +Returns +------- +{klass} + A {klass} that must have the same length as self. + +Raises +------ +ValueError : If the returned {klass} has a different length than self. + +See Also +-------- +{klass}.agg : Only perform aggregating type operations. +{klass}.apply : Invoke function on a {klass}. + +Examples +-------- +>>> df = pd.DataFrame({{'A': range(3), 'B': range(1, 4)}}) +>>> df + A B +0 0 1 +1 1 2 +2 2 3 +>>> df.transform(lambda x: x + 1) + A B +0 1 2 +1 2 3 +2 3 4 + +Even though the resulting {klass} must have the same length as the +input {klass}, it is possible to provide several input functions: + +>>> s = pd.Series(range(3)) +>>> s +0 0 +1 1 +2 2 +dtype: int64 +>>> s.transform([np.sqrt, np.exp]) + sqrt exp +0 0.000000 1.000000 +1 1.000000 2.718282 +2 1.414214 7.389056 +""" diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py index 3a345215482ed..346e60954fc13 100644 --- a/pandas/tests/frame/apply/test_frame_transform.py +++ b/pandas/tests/frame/apply/test_frame_transform.py @@ -1,72 +1,203 @@ import operator +import re import numpy as np import pytest -import pandas as pd +from pandas import DataFrame, MultiIndex import pandas._testing as tm +from pandas.core.base import SpecificationError +from pandas.core.groupby.base import transformation_kernels from pandas.tests.frame.common import zip_frames -def test_agg_transform(axis, float_frame): - other_axis = 1 if axis in {0, "index"} else 0 +def test_transform_ufunc(axis, float_frame): + # GH 35964 + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(float_frame) + result = float_frame.transform(np.sqrt, axis=axis) + expected = f_sqrt + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("op", transformation_kernels) +def test_transform_groupby_kernel(axis, float_frame, op): + # GH 35964 + if op == "cumcount": + pytest.xfail("DataFrame.cumcount does not exist") + if op == "tshift": + pytest.xfail("Only works on time index and is deprecated") + if axis == 1 or axis == "columns": + pytest.xfail("GH 36308: groupby.transform with axis=1 is broken") + + args = [0.0] if op == "fillna" else [] + if axis == 0 or axis == "index": + ones = np.ones(float_frame.shape[0]) + else: + ones = np.ones(float_frame.shape[1]) + expected = float_frame.groupby(ones, axis=axis).transform(op, *args) + result = float_frame.transform(op, axis, *args) + tm.assert_frame_equal(result, expected) + +@pytest.mark.parametrize( + "ops, names", [([np.sqrt], ["sqrt"]), ([np.abs, np.sqrt], ["absolute", "sqrt"])] +) +def test_transform_list(axis, float_frame, ops, names): + # GH 35964 + other_axis = 1 if axis in {0, "index"} else 0 with np.errstate(all="ignore"): + expected = zip_frames([op(float_frame) for op in ops], axis=other_axis) + if axis in {0, "index"}: + expected.columns = MultiIndex.from_product([float_frame.columns, names]) + else: + expected.index = MultiIndex.from_product([float_frame.index, names]) + result = float_frame.transform(ops, axis=axis) + tm.assert_frame_equal(result, expected) - f_abs = np.abs(float_frame) - f_sqrt = np.sqrt(float_frame) - # ufunc - result = float_frame.transform(np.sqrt, axis=axis) - expected = f_sqrt.copy() - tm.assert_frame_equal(result, expected) - - result = float_frame.transform(np.sqrt, axis=axis) - tm.assert_frame_equal(result, expected) - - # list-like - expected = f_sqrt.copy() - if axis in {0, "index"}: - expected.columns = pd.MultiIndex.from_product( - [float_frame.columns, ["sqrt"]] - ) - else: - expected.index = pd.MultiIndex.from_product([float_frame.index, ["sqrt"]]) - result = float_frame.transform([np.sqrt], axis=axis) - tm.assert_frame_equal(result, expected) - - # multiple items in list - # these are in the order as if we are applying both - # functions per series and then concatting - expected = zip_frames([f_abs, f_sqrt], axis=other_axis) - if axis in {0, "index"}: - expected.columns = pd.MultiIndex.from_product( - [float_frame.columns, ["absolute", "sqrt"]] - ) - else: - expected.index = pd.MultiIndex.from_product( - [float_frame.index, ["absolute", "sqrt"]] - ) - result = float_frame.transform([np.abs, "sqrt"], axis=axis) - tm.assert_frame_equal(result, expected) +def test_transform_dict(axis, float_frame): + # GH 35964 + if axis == 0 or axis == "index": + e = float_frame.columns[0] + expected = float_frame[[e]].transform(np.abs) + else: + e = float_frame.index[0] + expected = float_frame.iloc[[0]].transform(np.abs) + result = float_frame.transform({e: np.abs}, axis=axis) + tm.assert_frame_equal(result, expected) -def test_transform_and_agg_err(axis, float_frame): - # cannot both transform and agg - msg = "transforms cannot produce aggregated results" - with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "min"], axis=axis) +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_udf(axis, float_frame, use_apply): + # GH 35964 + # transform uses UDF either via apply or passing the entire DataFrame + def func(x): + # transform is using apply iff x is not a DataFrame + if use_apply == isinstance(x, DataFrame): + # Force transform to fallback + raise ValueError + return x + 1 - msg = "cannot combine transform and aggregation operations" - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - float_frame.transform(["max", "sqrt"], axis=axis) + result = float_frame.transform(func, axis=axis) + expected = float_frame + 1 + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) def test_transform_method_name(method): # GH 19760 - df = pd.DataFrame({"A": [-1, 2]}) + df = DataFrame({"A": [-1, 2]}) result = df.transform(method) expected = operator.methodcaller(method)(df) tm.assert_frame_equal(result, expected) + + +def test_transform_and_agg_err(axis, float_frame): + # GH 35964 + # cannot both transform and agg + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + float_frame.transform(["max", "min"], axis=axis) + + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + float_frame.transform(["max", "sqrt"], axis=axis) + + +def test_agg_dict_nested_renaming_depr(): + df = DataFrame({"A": range(5), "B": 5}) + + # nested renaming + msg = r"nested renamer is not supported" + with pytest.raises(SpecificationError, match=msg): + # mypy identifies the argument as an invalid type + df.transform({"A": {"foo": "min"}, "B": {"bar": "max"}}) + + +def test_transform_reducer_raises(all_reductions): + # GH 35964 + op = all_reductions + df = DataFrame({"A": [1, 2, 3]}) + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + df.transform(op) + with pytest.raises(ValueError, match=msg): + df.transform([op]) + with pytest.raises(ValueError, match=msg): + df.transform({"A": op}) + with pytest.raises(ValueError, match=msg): + df.transform({"A": [op]}) + + +# mypy doesn't allow adding lists of different types +# https://github.com/python/mypy/issues/5492 +@pytest.mark.parametrize("op", [*transformation_kernels, lambda x: x + 1]) +def test_transform_bad_dtype(op): + # GH 35964 + df = DataFrame({"A": 3 * [object]}) # DataFrame that will fail on most transforms + if op in ("backfill", "shift", "pad", "bfill", "ffill"): + pytest.xfail("Transform function works on any datatype") + msg = "Transform function failed" + with pytest.raises(ValueError, match=msg): + df.transform(op) + with pytest.raises(ValueError, match=msg): + df.transform([op]) + with pytest.raises(ValueError, match=msg): + df.transform({"A": op}) + with pytest.raises(ValueError, match=msg): + df.transform({"A": [op]}) + + +@pytest.mark.parametrize("op", transformation_kernels) +def test_transform_partial_failure(op): + # GH 35964 + wont_fail = ["ffill", "bfill", "fillna", "pad", "backfill", "shift"] + if op in wont_fail: + pytest.xfail("Transform kernel is successful on all dtypes") + if op == "cumcount": + pytest.xfail("transform('cumcount') not implemented") + if op == "tshift": + pytest.xfail("Only works on time index; deprecated") + + # Using object makes most transform kernels fail + df = DataFrame({"A": 3 * [object], "B": [1, 2, 3]}) + + expected = df[["B"]].transform([op]) + result = df.transform([op]) + tm.assert_equal(result, expected) + + expected = df[["B"]].transform({"B": op}) + result = df.transform({"B": op}) + tm.assert_equal(result, expected) + + expected = df[["B"]].transform({"B": [op]}) + result = df.transform({"B": [op]}) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_passes_args(use_apply): + # GH 35964 + # transform uses UDF either via apply or passing the entire DataFrame + expected_args = [1, 2] + expected_kwargs = {"c": 3} + + def f(x, a, b, c): + # transform is using apply iff x is not a DataFrame + if use_apply == isinstance(x, DataFrame): + # Force transform to fallback + raise ValueError + assert [a, b] == expected_args + assert c == expected_kwargs["c"] + return x + + DataFrame([1]).transform(f, 0, *expected_args, **expected_kwargs) + + +def test_transform_missing_columns(axis): + # GH 35964 + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + match = re.escape("Column(s) ['C'] do not exist") + with pytest.raises(SpecificationError, match=match): + df.transform({"C": "cumsum"}) diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py index b948317f32062..827f466e23106 100644 --- a/pandas/tests/series/apply/test_series_apply.py +++ b/pandas/tests/series/apply/test_series_apply.py @@ -209,8 +209,8 @@ def test_transform(self, string_series): f_abs = np.abs(string_series) # ufunc - expected = f_sqrt.copy() result = string_series.apply(np.sqrt) + expected = f_sqrt.copy() tm.assert_series_equal(result, expected) # list-like @@ -219,6 +219,9 @@ def test_transform(self, string_series): expected.columns = ["sqrt"] tm.assert_frame_equal(result, expected) + result = string_series.apply(["sqrt"]) + tm.assert_frame_equal(result, expected) + # multiple items in list # these are in the order as if we are applying both functions per # series and then concatting diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py index 8bc3d2dc4d0db..0842674da2a7d 100644 --- a/pandas/tests/series/apply/test_series_transform.py +++ b/pandas/tests/series/apply/test_series_transform.py @@ -1,50 +1,90 @@ import numpy as np import pytest -import pandas as pd +from pandas import DataFrame, Series, concat import pandas._testing as tm +from pandas.core.base import SpecificationError +from pandas.core.groupby.base import transformation_kernels -def test_transform(string_series): - # transforming functions - +def test_transform_ufunc(string_series): + # GH 35964 with np.errstate(all="ignore"): f_sqrt = np.sqrt(string_series) - f_abs = np.abs(string_series) - # ufunc - result = string_series.transform(np.sqrt) - expected = f_sqrt.copy() - tm.assert_series_equal(result, expected) + # ufunc + result = string_series.transform(np.sqrt) + expected = f_sqrt.copy() + tm.assert_series_equal(result, expected) - # list-like - result = string_series.transform([np.sqrt]) - expected = f_sqrt.to_frame().copy() - expected.columns = ["sqrt"] - tm.assert_frame_equal(result, expected) - result = string_series.transform([np.sqrt]) - tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize("op", transformation_kernels) +def test_transform_groupby_kernel(string_series, op): + # GH 35964 + if op == "cumcount": + pytest.xfail("Series.cumcount does not exist") + if op == "tshift": + pytest.xfail("Only works on time index and is deprecated") + + args = [0.0] if op == "fillna" else [] + ones = np.ones(string_series.shape[0]) + expected = string_series.groupby(ones).transform(op, *args) + result = string_series.transform(op, 0, *args) + tm.assert_series_equal(result, expected) - result = string_series.transform(["sqrt"]) - tm.assert_frame_equal(result, expected) - # multiple items in list - # these are in the order as if we are applying both functions per - # series and then concatting - expected = pd.concat([f_sqrt, f_abs], axis=1) - result = string_series.transform(["sqrt", "abs"]) - expected.columns = ["sqrt", "abs"] +@pytest.mark.parametrize( + "ops, names", [([np.sqrt], ["sqrt"]), ([np.abs, np.sqrt], ["absolute", "sqrt"])] +) +def test_transform_list(string_series, ops, names): + # GH 35964 + with np.errstate(all="ignore"): + expected = concat([op(string_series) for op in ops], axis=1) + expected.columns = names + result = string_series.transform(ops) tm.assert_frame_equal(result, expected) -def test_transform_and_agg_error(string_series): +def test_transform_dict(string_series): + # GH 35964 + with np.errstate(all="ignore"): + expected = concat([np.sqrt(string_series), np.abs(string_series)], axis=1) + expected.columns = ["foo", "bar"] + result = string_series.transform({"foo": np.sqrt, "bar": np.abs}) + tm.assert_frame_equal(result, expected) + + +def test_transform_udf(axis, string_series): + # GH 35964 + # via apply + def func(x): + if isinstance(x, Series): + raise ValueError + return x + 1 + + result = string_series.transform(func) + expected = string_series + 1 + tm.assert_series_equal(result, expected) + + # via map Series -> Series + def func(x): + if not isinstance(x, Series): + raise ValueError + return x + 1 + + result = string_series.transform(func) + expected = string_series + 1 + tm.assert_series_equal(result, expected) + + +def test_transform_wont_agg(string_series): + # GH 35964 # we are trying to transform with an aggregator - msg = "transforms cannot produce aggregated results" + msg = "Function did not transform" with pytest.raises(ValueError, match=msg): string_series.transform(["min", "max"]) - msg = "cannot combine transform and aggregation operations" + msg = "Function did not transform" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): string_series.transform(["sqrt", "max"]) @@ -52,8 +92,74 @@ def test_transform_and_agg_error(string_series): def test_transform_none_to_type(): # GH34377 - df = pd.DataFrame({"a": [None]}) - - msg = "DataFrame constructor called with incompatible data and dtype" - with pytest.raises(TypeError, match=msg): + df = DataFrame({"a": [None]}) + msg = "Transform function failed" + with pytest.raises(ValueError, match=msg): df.transform({"a": int}) + + +def test_transform_reducer_raises(all_reductions): + # GH 35964 + op = all_reductions + s = Series([1, 2, 3]) + msg = "Function did not transform" + with pytest.raises(ValueError, match=msg): + s.transform(op) + with pytest.raises(ValueError, match=msg): + s.transform([op]) + with pytest.raises(ValueError, match=msg): + s.transform({"A": op}) + with pytest.raises(ValueError, match=msg): + s.transform({"A": [op]}) + + +# mypy doesn't allow adding lists of different types +# https://github.com/python/mypy/issues/5492 +@pytest.mark.parametrize("op", [*transformation_kernels, lambda x: x + 1]) +def test_transform_bad_dtype(op): + # GH 35964 + s = Series(3 * [object]) # Series that will fail on most transforms + if op in ("backfill", "shift", "pad", "bfill", "ffill"): + pytest.xfail("Transform function works on any datatype") + msg = "Transform function failed" + with pytest.raises(ValueError, match=msg): + s.transform(op) + with pytest.raises(ValueError, match=msg): + s.transform([op]) + with pytest.raises(ValueError, match=msg): + s.transform({"A": op}) + with pytest.raises(ValueError, match=msg): + s.transform({"A": [op]}) + + +@pytest.mark.parametrize("use_apply", [True, False]) +def test_transform_passes_args(use_apply): + # GH 35964 + # transform uses UDF either via apply or passing the entire Series + expected_args = [1, 2] + expected_kwargs = {"c": 3} + + def f(x, a, b, c): + # transform is using apply iff x is not a Series + if use_apply == isinstance(x, Series): + # Force transform to fallback + raise ValueError + assert [a, b] == expected_args + assert c == expected_kwargs["c"] + return x + + Series([1]).transform(f, 0, *expected_args, **expected_kwargs) + + +def test_transform_axis_1_raises(): + # GH 35964 + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + Series([1]).transform("sum", axis=1) + + +def test_transform_nested_renamer(): + # GH 35964 + match = "nested renamer is not supported" + with pytest.raises(SpecificationError, match=match): + Series([1]).transform({"A": {"B": ["sum"]}}) From b8f22ad3b980cd7a3687f55586a25e10ad951329 Mon Sep 17 00:00:00 2001 From: Avinash Pancham <44933366+avinashpancham@users.noreply.github.com> Date: Sat, 12 Sep 2020 23:37:57 +0200 Subject: [PATCH 522/632] DEPR: Deprecate pandas/io/date_converters.py (#35741) --- doc/source/user_guide/io.rst | 15 +-- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/io/date_converters.py | 62 ++++++++++ pandas/tests/io/parser/test_parse_dates.py | 130 ++++++++++++++------- pandas/tests/io/test_date_converters.py | 15 ++- 5 files changed, 158 insertions(+), 66 deletions(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index a1ce2f847d4b8..4dfabaa99fff6 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -930,7 +930,7 @@ take full advantage of the flexibility of the date parsing API: .. ipython:: python df = pd.read_csv('tmp.csv', header=None, parse_dates=date_spec, - date_parser=pd.io.date_converters.parse_date_time) + date_parser=pd.to_datetime) df Pandas will try to call the ``date_parser`` function in three different ways. If @@ -942,11 +942,6 @@ an exception is raised, the next one is tried: 2. If #1 fails, ``date_parser`` is called with all the columns concatenated row-wise into a single array (e.g., ``date_parser(['2013 1', '2013 2'])``). -3. If #2 fails, ``date_parser`` is called once for every row with one or more - string arguments from the columns indicated with `parse_dates` - (e.g., ``date_parser('2013', '1')`` for the first row, ``date_parser('2013', '2')`` - for the second, etc.). - Note that performance-wise, you should try these methods of parsing dates in order: 1. Try to infer the format using ``infer_datetime_format=True`` (see section below). @@ -958,14 +953,6 @@ Note that performance-wise, you should try these methods of parsing dates in ord For optimal performance, this should be vectorized, i.e., it should accept arrays as arguments. -You can explore the date parsing functionality in -`date_converters.py `__ -and add your own. We would love to turn this module into a community supported -set of date/time parsers. To get you started, ``date_converters.py`` contains -functions to parse dual date and time columns, year/month/day columns, -and year/month/day/hour/minute/second columns. It also contains a -``generic_parser`` function so you can curry it with a function that deals with -a single date rather than the entire array. .. ipython:: python :suppress: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index af2960b5038b2..2b230d2fde645 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -195,7 +195,7 @@ Deprecations ~~~~~~~~~~~~ - Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) - Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`) -- +- Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/date_converters.py b/pandas/io/date_converters.py index 07919dbda63ae..f079a25f69fec 100644 --- a/pandas/io/date_converters.py +++ b/pandas/io/date_converters.py @@ -1,16 +1,46 @@ """This module is designed for community supported date conversion functions""" +import warnings + import numpy as np from pandas._libs.tslibs import parsing def parse_date_time(date_col, time_col): + """ + Parse columns with dates and times into a single datetime column. + + .. deprecated:: 1.2 + """ + warnings.warn( + """ + Use pd.to_datetime(date_col + " " + time_col) instead to get a Pandas Series. + Use pd.to_datetime(date_col + " " + time_col).to_pydatetime() instead to get a Numpy array. +""", # noqa: E501 + FutureWarning, + stacklevel=2, + ) date_col = _maybe_cast(date_col) time_col = _maybe_cast(time_col) return parsing.try_parse_date_and_time(date_col, time_col) def parse_date_fields(year_col, month_col, day_col): + """ + Parse columns with years, months and days into a single date column. + + .. deprecated:: 1.2 + """ + warnings.warn( + """ + Use pd.to_datetime({"year": year_col, "month": month_col, "day": day_col}) instead to get a Pandas Series. + Use ser = pd.to_datetime({"year": year_col, "month": month_col, "day": day_col}) and + np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. +""", # noqa: E501 + FutureWarning, + stacklevel=2, + ) + year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) @@ -18,6 +48,24 @@ def parse_date_fields(year_col, month_col, day_col): def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_col): + """ + Parse columns with datetime information into a single datetime column. + + .. deprecated:: 1.2 + """ + + warnings.warn( + """ + Use pd.to_datetime({"year": year_col, "month": month_col, "day": day_col, + "hour": hour_col, "minute": minute_col, second": second_col}) instead to get a Pandas Series. + Use ser = pd.to_datetime({"year": year_col, "month": month_col, "day": day_col, + "hour": hour_col, "minute": minute_col, second": second_col}) and + np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array. +""", # noqa: E501 + FutureWarning, + stacklevel=2, + ) + year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) @@ -30,6 +78,20 @@ def parse_all_fields(year_col, month_col, day_col, hour_col, minute_col, second_ def generic_parser(parse_func, *cols): + """ + Use dateparser to parse columns with data information into a single datetime column. + + .. deprecated:: 1.2 + """ + + warnings.warn( + """ + Use pd.to_datetime instead. +""", + FutureWarning, + stacklevel=2, + ) + N = _check_columns(cols) results = np.empty(N, dtype=object) diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 833186b69c63b..662659982c0b3 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -370,7 +370,11 @@ def test_date_col_as_index_col(all_parsers): tm.assert_frame_equal(result, expected) -def test_multiple_date_cols_int_cast(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_time, FutureWarning], [pd.to_datetime, None]), +) +def test_multiple_date_cols_int_cast(all_parsers, date_parser, warning): data = ( "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n" "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n" @@ -382,13 +386,15 @@ def test_multiple_date_cols_int_cast(all_parsers): parse_dates = {"actual": [1, 2], "nominal": [1, 3]} parser = all_parsers - result = parser.read_csv( - StringIO(data), - header=None, - date_parser=conv.parse_date_time, - parse_dates=parse_dates, - prefix="X", - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=None, + date_parser=date_parser, + parse_dates=parse_dates, + prefix="X", + ) + expected = DataFrame( [ [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56), "KORD", 0.81], @@ -808,7 +814,9 @@ def test_parse_dates_custom_euro_format(all_parsers, kwargs): tm.assert_frame_equal(df, expected) else: msg = "got an unexpected keyword argument 'day_first'" - with pytest.raises(TypeError, match=msg): + with pytest.raises(TypeError, match=msg), tm.assert_produces_warning( + FutureWarning + ): parser.read_csv( StringIO(data), names=["time", "Q", "NTU"], @@ -1166,7 +1174,11 @@ def test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected): tm.assert_frame_equal(result, expected) -def test_parse_date_time_multi_level_column_name(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_time, FutureWarning], [pd.to_datetime, None]), +) +def test_parse_date_time_multi_level_column_name(all_parsers, date_parser, warning): data = """\ D,T,A,B date, time,a,b @@ -1174,12 +1186,13 @@ def test_parse_date_time_multi_level_column_name(all_parsers): 2001-01-06, 00:00:00, 1.0, 11. """ parser = all_parsers - result = parser.read_csv( - StringIO(data), - header=[0, 1], - parse_dates={"date_time": [0, 1]}, - date_parser=conv.parse_date_time, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=[0, 1], + parse_dates={"date_time": [0, 1]}, + date_parser=date_parser, + ) expected_data = [ [datetime(2001, 1, 5, 9, 0, 0), 0.0, 10.0], @@ -1189,6 +1202,10 @@ def test_parse_date_time_multi_level_column_name(all_parsers): tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_time, FutureWarning], [pd.to_datetime, None]), +) @pytest.mark.parametrize( "data,kwargs,expected", [ @@ -1261,9 +1278,10 @@ def test_parse_date_time_multi_level_column_name(all_parsers): ), ], ) -def test_parse_date_time(all_parsers, data, kwargs, expected): +def test_parse_date_time(all_parsers, data, kwargs, expected, date_parser, warning): parser = all_parsers - result = parser.read_csv(StringIO(data), date_parser=conv.parse_date_time, **kwargs) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv(StringIO(data), date_parser=date_parser, **kwargs) # Python can sometimes be flaky about how # the aggregated columns are entered, so @@ -1272,15 +1290,20 @@ def test_parse_date_time(all_parsers, data, kwargs, expected): tm.assert_frame_equal(result, expected) -def test_parse_date_fields(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ([conv.parse_date_fields, FutureWarning], [pd.to_datetime, None]), +) +def test_parse_date_fields(all_parsers, date_parser, warning): parser = all_parsers data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." - result = parser.read_csv( - StringIO(data), - header=0, - parse_dates={"ymd": [0, 1, 2]}, - date_parser=conv.parse_date_fields, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + parse_dates={"ymd": [0, 1, 2]}, + date_parser=date_parser, + ) expected = DataFrame( [[datetime(2001, 1, 10), 10.0], [datetime(2001, 2, 1), 11.0]], @@ -1289,19 +1312,27 @@ def test_parse_date_fields(all_parsers): tm.assert_frame_equal(result, expected) -def test_parse_date_all_fields(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ( + [conv.parse_all_fields, FutureWarning], + [lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S"), None], + ), +) +def test_parse_date_all_fields(all_parsers, date_parser, warning): parser = all_parsers data = """\ year,month,day,hour,minute,second,a,b 2001,01,05,10,00,0,0.0,10. 2001,01,5,10,0,00,1.,11. """ - result = parser.read_csv( - StringIO(data), - header=0, - date_parser=conv.parse_all_fields, - parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + date_parser=date_parser, + parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, + ) expected = DataFrame( [ [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0], @@ -1312,19 +1343,27 @@ def test_parse_date_all_fields(all_parsers): tm.assert_frame_equal(result, expected) -def test_datetime_fractional_seconds(all_parsers): +@pytest.mark.parametrize( + "date_parser, warning", + ( + [conv.parse_all_fields, FutureWarning], + [lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S.%f"), None], + ), +) +def test_datetime_fractional_seconds(all_parsers, date_parser, warning): parser = all_parsers data = """\ year,month,day,hour,minute,second,a,b 2001,01,05,10,00,0.123456,0.0,10. 2001,01,5,10,0,0.500000,1.,11. """ - result = parser.read_csv( - StringIO(data), - header=0, - date_parser=conv.parse_all_fields, - parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, - ) + with tm.assert_produces_warning(warning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + date_parser=date_parser, + parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, + ) expected = DataFrame( [ [datetime(2001, 1, 5, 10, 0, 0, microsecond=123456), 0.0, 10.0], @@ -1339,12 +1378,13 @@ def test_generic(all_parsers): parser = all_parsers data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." - result = parser.read_csv( - StringIO(data), - header=0, - parse_dates={"ym": [0, 1]}, - date_parser=lambda y, m: date(year=int(y), month=int(m), day=1), - ) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = parser.read_csv( + StringIO(data), + header=0, + parse_dates={"ym": [0, 1]}, + date_parser=lambda y, m: date(year=int(y), month=int(m), day=1), + ) expected = DataFrame( [[date(2001, 1, 1), 10, 10.0], [date(2001, 2, 1), 1, 11.0]], columns=["ym", "day", "a"], diff --git a/pandas/tests/io/test_date_converters.py b/pandas/tests/io/test_date_converters.py index cdb8eca02a3e5..a9fa27e091714 100644 --- a/pandas/tests/io/test_date_converters.py +++ b/pandas/tests/io/test_date_converters.py @@ -8,11 +8,12 @@ def test_parse_date_time(): + dates = np.array(["2007/1/3", "2008/2/4"], dtype=object) times = np.array(["05:07:09", "06:08:00"], dtype=object) expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)]) - - result = conv.parse_date_time(dates, times) + with tm.assert_produces_warning(FutureWarning): + result = conv.parse_date_time(dates, times) tm.assert_numpy_array_equal(result, expected) @@ -20,9 +21,10 @@ def test_parse_date_fields(): days = np.array([3, 4]) months = np.array([1, 2]) years = np.array([2007, 2008]) - result = conv.parse_date_fields(years, months, days) - expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)]) + + with tm.assert_produces_warning(FutureWarning): + result = conv.parse_date_fields(years, months, days) tm.assert_numpy_array_equal(result, expected) @@ -34,7 +36,8 @@ def test_parse_all_fields(): days = np.array([3, 4]) years = np.array([2007, 2008]) months = np.array([1, 2]) - - result = conv.parse_all_fields(years, months, days, hours, minutes, seconds) expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)]) + + with tm.assert_produces_warning(FutureWarning): + result = conv.parse_all_fields(years, months, days, hours, minutes, seconds) tm.assert_numpy_array_equal(result, expected) From 822dc6f901fafd646257de2fc5ea918bbec82f93 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Sat, 12 Sep 2020 17:39:39 -0400 Subject: [PATCH 523/632] REGR: Series access with Index of tuples/frozenset (#36147) --- doc/source/whatsnew/v1.1.3.rst | 2 ++ pandas/core/series.py | 22 +++++++++---------- pandas/tests/series/indexing/test_indexing.py | 21 +++++++++++++++++- 3 files changed, 33 insertions(+), 12 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index c06990e3f2051..25d223418fc92 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -14,6 +14,8 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`) +- Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 632b93cdcf24b..ef9ade5c7bb15 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -889,21 +889,19 @@ def __getitem__(self, key): elif key_is_scalar: return self._get_value(key) - if ( - isinstance(key, tuple) - and is_hashable(key) - and isinstance(self.index, MultiIndex) - ): + if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: + # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except KeyError: - # We still have the corner case where this tuple is a key - # in the first level of our MultiIndex - return self._get_values_tuple(key) + if isinstance(key, tuple) and isinstance(self.index, MultiIndex): + # We still have the corner case where a tuple is a key + # in the first level of our MultiIndex + return self._get_values_tuple(key) if is_iterator(key): key = list(key) @@ -963,7 +961,7 @@ def _get_values_tuple(self, key): return result if not isinstance(self.index, MultiIndex): - raise ValueError("Can only tuple-index with a MultiIndex") + raise ValueError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) @@ -1017,9 +1015,11 @@ def __setitem__(self, key, value): # GH#12862 adding an new key to the Series self.loc[key] = value - except TypeError as e: + except TypeError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): - raise ValueError("Can only tuple-index with a MultiIndex") from e + raise ValueError( + "key of type tuple not found and not a MultiIndex" + ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 3ed25b8bca566..1fafdf00393e1 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -383,7 +383,7 @@ def test_2d_to_1d_assignment_raises(): @pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning") def test_basic_getitem_setitem_corner(datetime_series): # invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2] - msg = "Can only tuple-index with a MultiIndex" + msg = "key of type tuple not found and not a MultiIndex" with pytest.raises(ValueError, match=msg): datetime_series[:, 2] with pytest.raises(ValueError, match=msg): @@ -942,3 +942,22 @@ def assert_slices_equivalent(l_slc, i_slc): for key2 in [keystr2, box(keystr2)]: assert_slices_equivalent(SLC[key2:key:-1], SLC[13:8:-1]) assert_slices_equivalent(SLC[key:key2:-1], SLC[0:0:-1]) + + +def test_tuple_index(): + # GH 35534 - Selecting values when a Series has an Index of tuples + s = pd.Series([1, 2], index=[("a",), ("b",)]) + assert s[("a",)] == 1 + assert s[("b",)] == 2 + s[("b",)] = 3 + assert s[("b",)] == 3 + + +def test_frozenset_index(): + # GH35747 - Selecting values when a Series has an Index of frozenset + idx0, idx1 = frozenset("a"), frozenset("b") + s = pd.Series([1, 2], index=[idx0, idx1]) + assert s[idx0] == 1 + assert s[idx1] == 2 + s[idx1] = 3 + assert s[idx1] == 3 From 229722e6a24b9658a27fc06b617701ad02aaa435 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 12 Sep 2020 16:24:33 -0700 Subject: [PATCH 524/632] ENH: consistently cast strings for DTA/TDA/PA.__setitem__ (#36261) * ENH: consistently cast strings for DTA/TDA/PA.__setitem__ * whatsnew --- doc/source/whatsnew/v1.2.0.rst | 2 ++ pandas/core/arrays/datetimelike.py | 3 +-- pandas/tests/arrays/test_datetimelike.py | 31 ++++++++++++++++++++---- pandas/tests/arrays/test_datetimes.py | 23 ++++++++++++++++++ 4 files changed, 52 insertions(+), 7 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2b230d2fde645..e577a8f26bd12 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -230,6 +230,8 @@ Datetimelike - Bug in :meth:`DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`DatetimeIndex` (:issue:`35690`) - Bug in :meth:`DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`) - Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64``, ``timedelta64`` or ``Period`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`,:issue:`36254`) +- Inconsistency in :class:`DatetimeArray`, :class:`TimedeltaArray`, and :class:`PeriodArray` setitem casting arrays of strings to datetimelike scalars but not scalar strings (:issue:`36261`) +- Timedelta ^^^^^^^^^ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index b013246e724de..6f0e2a6a598fc 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -875,8 +875,7 @@ def _validate_setitem_value(self, value): if is_list_like(value): value = self._validate_listlike(value, "setitem", cast_str=True) else: - # TODO: cast_str for consistency? - value = self._validate_scalar(value, msg, cast_str=False) + value = self._validate_scalar(value, msg, cast_str=True) return self._unbox(value, setitem=True) diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 624335fd78b0f..0ae6b5bde5297 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -2,6 +2,7 @@ import numpy as np import pytest +import pytz from pandas._libs import OutOfBoundsDatetime from pandas.compat.numpy import np_version_under1p18 @@ -282,15 +283,35 @@ def test_setitem(self): expected[:2] = expected[-2:] tm.assert_numpy_array_equal(arr.asi8, expected) - def test_setitem_str_array(self, arr1d): - if isinstance(arr1d, DatetimeArray) and arr1d.tz is not None: - pytest.xfail(reason="timezone comparisons inconsistent") + def test_setitem_strs(self, arr1d): + # Check that we parse strs in both scalar and listlike + if isinstance(arr1d, DatetimeArray): + tz = arr1d.tz + if ( + tz is not None + and tz is not pytz.UTC + and not isinstance(tz, pytz._FixedOffset) + ): + # If we have e.g. tzutc(), when we cast to string and parse + # back we get pytz.UTC, and then consider them different timezones + # so incorrectly raise. + pytest.xfail(reason="timezone comparisons inconsistent") + + # Setting list-like of strs expected = arr1d.copy() expected[[0, 1]] = arr1d[-2:] - arr1d[:2] = [str(x) for x in arr1d[-2:]] + result = arr1d.copy() + result[:2] = [str(x) for x in arr1d[-2:]] + tm.assert_equal(result, expected) - tm.assert_equal(arr1d, expected) + # Same thing but now for just a scalar str + expected = arr1d.copy() + expected[0] = arr1d[-1] + + result = arr1d.copy() + result[0] = str(arr1d[-1]) + tm.assert_equal(result, expected) @pytest.mark.parametrize("as_index", [True, False]) def test_setitem_categorical(self, arr1d, as_index): diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 804654451a6d9..53f26de09f94e 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -197,6 +197,29 @@ def test_tz_setter_raises(self): with pytest.raises(AttributeError, match="tz_localize"): arr.tz = "UTC" + def test_setitem_str_impute_tz(self, tz_naive_fixture): + # Like for getitem, if we are passed a naive-like string, we impute + # our own timezone. + tz = tz_naive_fixture + + data = np.array([1, 2, 3], dtype="M8[ns]") + dtype = data.dtype if tz is None else DatetimeTZDtype(tz=tz) + arr = DatetimeArray(data, dtype=dtype) + expected = arr.copy() + + ts = pd.Timestamp("2020-09-08 16:50").tz_localize(tz) + setter = str(ts.tz_localize(None)) + + # Setting a scalar tznaive string + expected[0] = ts + arr[0] = setter + tm.assert_equal(arr, expected) + + # Setting a listlike of tznaive strings + expected[1] = ts + arr[:2] = [setter, setter] + tm.assert_equal(arr, expected) + def test_setitem_different_tz_raises(self): data = np.array([1, 2, 3], dtype="M8[ns]") arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central")) From e47d5ebbe0f714d199aa08eda68cb4fbe42503a2 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Sun, 13 Sep 2020 06:17:04 -0500 Subject: [PATCH 525/632] CI: install numpy from pip #36296 (#36323) --- ci/build39.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/ci/build39.sh b/ci/build39.sh index f85e1c7def206..b9c76635df99b 100755 --- a/ci/build39.sh +++ b/ci/build39.sh @@ -3,16 +3,9 @@ sudo apt-get install build-essential gcc xvfb pip install --no-deps -U pip wheel setuptools -pip install python-dateutil pytz pytest pytest-xdist hypothesis +pip install numpy python-dateutil pytz pytest pytest-xdist hypothesis pip install cython --pre # https://github.com/cython/cython/issues/3395 -git clone https://github.com/numpy/numpy -cd numpy -python setup.py build_ext --inplace -python setup.py install -cd .. -rm -rf numpy - python setup.py build_ext -inplace python -m pip install --no-build-isolation -e . From 5647251706075dc20507cdd9b408e5a74311e4fb Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 05:26:21 -0700 Subject: [PATCH 526/632] REF: _convert_for_op -> _validate_fill_value (#36318) --- pandas/core/indexes/base.py | 8 +++++--- pandas/core/indexes/datetimes.py | 6 ++---- pandas/core/indexes/numeric.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 0aed08d46657e..b0f9f8ac8b2fd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4047,9 +4047,10 @@ def _to_safe_for_reshape(self): """ return self - def _convert_for_op(self, value): + def _validate_fill_value(self, value): """ - Convert value to be insertable to ndarray. + Check if the value can be inserted into our array, and convert + it to an appropriate native type if necessary. """ return value @@ -4228,7 +4229,8 @@ def putmask(self, mask, value): """ values = self.values.copy() try: - np.putmask(values, mask, self._convert_for_op(value)) + converted = self._validate_fill_value(value) + np.putmask(values, mask, converted) if is_period_dtype(self.dtype): # .values cast to object, so we need to cast back values = type(self)(values)._data diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f269495f6011a..2d166773dda2c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -325,13 +325,11 @@ def __reduce__(self): d.update(self._get_attributes_dict()) return _new_DatetimeIndex, (type(self), d), None - def _convert_for_op(self, value): + def _validate_fill_value(self, value): """ Convert value to be insertable to ndarray. """ - if self._has_same_tz(value): - return Timestamp(value).asm8 - raise ValueError("Passed item and index have different timezone") + return self._data._validate_setitem_value(value) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index e8b7efeee8852..f6859cbc4c0a2 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -116,7 +116,7 @@ def _shallow_copy(self, values=None, name: Label = lib.no_default): return Float64Index._simple_new(values, name=name) return super()._shallow_copy(values=values, name=name) - def _convert_for_op(self, value): + def _validate_fill_value(self, value): """ Convert value to be insertable to ndarray. """ From 28aab6535e67279e5038490d97ee88ac422874d6 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 05:28:15 -0700 Subject: [PATCH 527/632] REF: separate out helpers from iLoc._setitem_with_indexer (#36315) --- pandas/core/indexing.py | 141 ++++++++++++++++++++++------------------ 1 file changed, 77 insertions(+), 64 deletions(-) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 51031d9ab1153..64da27a6574a6 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -34,7 +34,7 @@ from pandas.core.indexes.api import Index if TYPE_CHECKING: - from pandas import DataFrame # noqa:F401 + from pandas import DataFrame, Series # noqa:F401 # "null slice" _NS = slice(None, None) @@ -1543,13 +1543,10 @@ def _setitem_with_indexer(self, indexer, value): since it goes from positional indexers back to labels when calling BlockManager methods, see GH#12991, GH#22046, GH#15686. """ - - # also has the side effect of consolidating in-place - from pandas import Series - info_axis = self.obj._info_axis_number # maybe partial set + # _is_mixed_type has the side effect of consolidating in-place take_split_path = self.obj._is_mixed_type # if there is only one block/type, still have to take split path @@ -1642,6 +1639,8 @@ def _setitem_with_indexer(self, indexer, value): # align and set the values if take_split_path: + # We have to operate column-wise + # Above we only set take_split_path to True for 2D cases assert self.ndim == 2 assert info_axis == 1 @@ -1682,29 +1681,6 @@ def _setitem_with_indexer(self, indexer, value): pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer - def isetter(loc, v): - # positional setting on column loc - ser = self.obj._ixs(loc, axis=1) - - # perform the equivalent of a setitem on the info axis - # as we have a null slice or a slice with full bounds - # which means essentially reassign to the columns of a - # multi-dim object - # GH6149 (null slice), GH10408 (full bounds) - if isinstance(pi, tuple) and all( - com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj)) - for idx in pi - ): - ser = v - else: - # set the item, possibly having a dtype change - ser = ser.copy() - ser._mgr = ser._mgr.setitem(indexer=pi, value=v) - ser._maybe_update_cacher(clear=True) - - # reset the sliced object if unique - self.obj._iset_item(loc, ser) - # we need an iterable, with a ndim of at least 1 # eg. don't pass through np.array(0) if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0: @@ -1725,7 +1701,7 @@ def isetter(loc, v): else: v = np.nan - isetter(loc, v) + self._setitem_single_column(loc, v, pi) # we have an equal len ndarray/convertible to our labels # hasattr first, to avoid coercing to ndarray without reason. @@ -1744,7 +1720,7 @@ def isetter(loc, v): for i, loc in enumerate(ilocs): # setting with a list, re-coerces - isetter(loc, value[:, i].tolist()) + self._setitem_single_column(loc, value[:, i].tolist(), pi) elif ( len(labels) == 1 @@ -1753,7 +1729,7 @@ def isetter(loc, v): ): # we have an equal len list/ndarray # We only get here with len(labels) == len(ilocs) == 1 - isetter(ilocs[0], value) + self._setitem_single_column(ilocs[0], value, pi) elif lplane_indexer == 0 and len(value) == len(self.obj.index): # We get here in one case via .loc with a all-False mask @@ -1768,50 +1744,87 @@ def isetter(loc, v): ) for loc, v in zip(ilocs, value): - isetter(loc, v) + self._setitem_single_column(loc, v, pi) else: # scalar value for loc in ilocs: - isetter(loc, value) + self._setitem_single_column(loc, value, pi) else: - if isinstance(indexer, tuple): + self._setitem_single_block_inplace(indexer, value) + + def _setitem_single_column(self, loc: int, value, plane_indexer): + # positional setting on column loc + pi = plane_indexer + + ser = self.obj._ixs(loc, axis=1) + + # perform the equivalent of a setitem on the info axis + # as we have a null slice or a slice with full bounds + # which means essentially reassign to the columns of a + # multi-dim object + # GH#6149 (null slice), GH#10408 (full bounds) + if isinstance(pi, tuple) and all( + com.is_null_slice(idx) or com.is_full_slice(idx, len(self.obj)) + for idx in pi + ): + ser = value + else: + # set the item, possibly having a dtype change + ser = ser.copy() + ser._mgr = ser._mgr.setitem(indexer=pi, value=value) + ser._maybe_update_cacher(clear=True) - # if we are setting on the info axis ONLY - # set using those methods to avoid block-splitting - # logic here - if ( - len(indexer) > info_axis - and is_integer(indexer[info_axis]) - and all( - com.is_null_slice(idx) - for i, idx in enumerate(indexer) - if i != info_axis - ) - and item_labels.is_unique - ): - self.obj[item_labels[indexer[info_axis]]] = value - return + # reset the sliced object if unique + self.obj._iset_item(loc, ser) - indexer = maybe_convert_ix(*indexer) + def _setitem_single_block_inplace(self, indexer, value): + """ + _setitem_with_indexer for the case when we have a single Block + and the value can be set into it without casting. + """ + from pandas import Series - if isinstance(value, (ABCSeries, dict)): - # TODO(EA): ExtensionBlock.setitem this causes issues with - # setting for extensionarrays that store dicts. Need to decide - # if it's worth supporting that. - value = self._align_series(indexer, Series(value)) + info_axis = self.obj._info_axis_number + item_labels = self.obj._get_axis(info_axis) - elif isinstance(value, ABCDataFrame): - value = self._align_frame(indexer, value) + if isinstance(indexer, tuple): - # check for chained assignment - self.obj._check_is_chained_assignment_possible() + # if we are setting on the info axis ONLY + # set using those methods to avoid block-splitting + # logic here + if ( + len(indexer) > info_axis + and is_integer(indexer[info_axis]) + and all( + com.is_null_slice(idx) + for i, idx in enumerate(indexer) + if i != info_axis + ) + and item_labels.is_unique + ): + self.obj[item_labels[indexer[info_axis]]] = value + return - # actually do the set - self.obj._consolidate_inplace() - self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) - self.obj._maybe_update_cacher(clear=True) + indexer = maybe_convert_ix(*indexer) + + if isinstance(value, (ABCSeries, dict)): + # TODO(EA): ExtensionBlock.setitem this causes issues with + # setting for extensionarrays that store dicts. Need to decide + # if it's worth supporting that. + value = self._align_series(indexer, Series(value)) + + elif isinstance(value, ABCDataFrame): + value = self._align_frame(indexer, value) + + # check for chained assignment + self.obj._check_is_chained_assignment_possible() + + # actually do the set + self.obj._consolidate_inplace() + self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) + self.obj._maybe_update_cacher(clear=True) def _setitem_with_indexer_missing(self, indexer, value): """ @@ -1873,7 +1886,7 @@ def _setitem_with_indexer_missing(self, indexer, value): self.obj._mgr = self.obj.append(value)._mgr self.obj._maybe_update_cacher(clear=True) - def _align_series(self, indexer, ser: ABCSeries, multiindex_indexer: bool = False): + def _align_series(self, indexer, ser: "Series", multiindex_indexer: bool = False): """ Parameters ---------- From 14782911e813ded770dfa3db9d91853b9769ab69 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 05:29:31 -0700 Subject: [PATCH 528/632] PERF: CategoricalDtype.__eq__ (#36280) --- pandas/core/dtypes/dtypes.py | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e321fdd9b3a9b..2e5dc15131e70 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -375,12 +375,30 @@ def __eq__(self, other: Any) -> bool: # but same order is not necessary. There is no distinction between # ordered=False and ordered=None: CDT(., False) and CDT(., None) # will be equal if they have the same categories. - if ( - self.categories.dtype == other.categories.dtype - and self.categories.equals(other.categories) - ): + left = self.categories + right = other.categories + + # GH#36280 the ordering of checks here is for performance + if not left.dtype == right.dtype: + return False + + if len(left) != len(right): + return False + + if self.categories.equals(other.categories): # Check and see if they happen to be identical categories return True + + if left.dtype != object: + # Faster than calculating hash + indexer = left.get_indexer(right) + # Because left and right have the same length and are unique, + # `indexer` not having any -1s implies that there is a + # bijection between `left` and `right`. + return (indexer != -1).all() + + # With object-dtype we need a comparison that identifies + # e.g. int(2) as distinct from float(2) return hash(self) == hash(other) def __repr__(self) -> str_type: From 23e28dfcdffdc7abe64bcc98d95de4977354c13c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 05:31:17 -0700 Subject: [PATCH 529/632] REF: de-duplicate _wrap_joined_index in MultiIndex (#36313) --- pandas/core/indexes/base.py | 5 ++++- pandas/core/indexes/multi.py | 25 +++++++++++++++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b0f9f8ac8b2fd..04a63beb2ef45 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3876,7 +3876,10 @@ def _join_monotonic(self, other, how="left", return_indexers=False): return join_index def _wrap_joined_index(self, joined, other): - name = get_op_result_name(self, other) + if isinstance(self, ABCMultiIndex): + name = self.names if self.names == other.names else None + else: + name = get_op_result_name(self, other) return self._constructor(joined, name=name) # -------------------------------------------------------------------- diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 7aceb898f5ccf..197c5f42ed62f 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,3 +1,4 @@ +from functools import wraps from sys import getsizeof from typing import ( TYPE_CHECKING, @@ -152,6 +153,25 @@ def _codes_to_ints(self, codes): return np.bitwise_or.reduce(codes, axis=1) +def names_compat(meth): + """ + A decorator to allow either `name` or `names` keyword but not both. + + This makes it easier to share code with base class. + """ + + @wraps(meth) + def new_meth(self_or_cls, *args, **kwargs): + if "name" in kwargs and "names" in kwargs: + raise TypeError("Can only provide one of `names` and `name`") + elif "name" in kwargs: + kwargs["names"] = kwargs.pop("name") + + return meth(self_or_cls, *args, **kwargs) + + return new_meth + + class MultiIndex(Index): """ A multi-level, or hierarchical, index object for pandas objects. @@ -449,6 +469,7 @@ def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> "MultiInde ) @classmethod + @names_compat def from_tuples( cls, tuples, @@ -3635,10 +3656,6 @@ def delete(self, loc): verify_integrity=False, ) - def _wrap_joined_index(self, joined, other): - names = self.names if self.names == other.names else None - return self._constructor(joined, names=names) - @doc(Index.isin) def isin(self, values, level=None): if level is None: From 22374c3b50f3a1691e5867ab9a950f44826a1bb7 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sun, 13 Sep 2020 07:34:51 -0500 Subject: [PATCH 530/632] BUG: Don't overflow with large int scalar (#36316) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/core/dtypes/cast.py | 5 +++++ pandas/tests/series/test_constructors.py | 7 +++++++ 3 files changed, 13 insertions(+) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 25d223418fc92..5cbd160f29d66 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -25,6 +25,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`) +- Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 64ccc0be0a25d..05759ffb43dde 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -697,6 +697,11 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, else: dtype = np.dtype(np.int64) + try: + np.array(val, dtype=dtype) + except OverflowError: + dtype = np.array(val).dtype + elif is_float(val): if isinstance(val, np.floating): dtype = np.dtype(type(val)) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 0fb8c5955a2e7..8ac0a55e63cd1 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1501,3 +1501,10 @@ def test_construction_from_ordered_collection(self): result = Series({"a": 1, "b": 2}.values()) expected = Series([1, 2]) tm.assert_series_equal(result, expected) + + def test_construction_from_large_int_scalar_no_overflow(self): + # https://github.com/pandas-dev/pandas/issues/36291 + n = 1_000_000_000_000_000_000_000 + result = Series(n, index=[0]) + expected = Series(n) + tm.assert_series_equal(result, expected) From 2d95908d378d4b23889194a21ff2411c29554d2b Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Sun, 13 Sep 2020 13:39:58 +0100 Subject: [PATCH 531/632] PERF: constructing string Series (#36317) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/construction.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e577a8f26bd12..a3260f4089e7d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -205,7 +205,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`) +- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`) - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 0993328aef8de..3ec5bc90d521d 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -37,6 +37,7 @@ is_list_like, is_object_dtype, is_sparse, + is_string_dtype, is_timedelta64_ns_dtype, ) from pandas.core.dtypes.generic import ( @@ -510,7 +511,8 @@ def sanitize_array( data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) - if is_object_dtype(subarr.dtype) and not is_object_dtype(dtype): + is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype) + if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype: inferred = lib.infer_dtype(subarr, skipna=False) if inferred in {"interval", "period"}: subarr = array(subarr) From 7a7b0535a245c1d4c669ca1d934b3f469c1db7d3 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 05:40:54 -0700 Subject: [PATCH 532/632] REF: de-duplicate get_indexer_non_unique (#36322) --- pandas/core/indexes/multi.py | 4 ---- pandas/core/indexes/period.py | 19 +++---------------- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 197c5f42ed62f..a21a54e4a9be3 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2533,10 +2533,6 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): return ensure_platform_int(indexer) - @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) - def get_indexer_non_unique(self, target): - return super().get_indexer_non_unique(target) - def get_slice_bound( self, label: Union[Hashable, Sequence[Hashable]], side: str, kind: str ) -> int: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 5282b6f0154b4..42dce1bd53f22 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -12,7 +12,6 @@ from pandas.util._decorators import Appender, cache_readonly, doc from pandas.core.dtypes.common import ( - ensure_platform_int, is_bool_dtype, is_datetime64_any_dtype, is_dtype_equal, @@ -473,12 +472,13 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): target = ensure_index(target) if isinstance(target, PeriodIndex): - if target.freq != self.freq: + if not self._is_comparable_dtype(target.dtype): + # i.e. target.freq != self.freq # No matches no_matches = -1 * np.ones(self.shape, dtype=np.intp) return no_matches - target = target.asi8 + target = target._get_engine_target() # i.e. target.asi8 self_index = self._int64index else: self_index = self @@ -491,19 +491,6 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): return Index.get_indexer(self_index, target, method, limit, tolerance) - @Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs) - def get_indexer_non_unique(self, target): - target = ensure_index(target) - - if not self._is_comparable_dtype(target.dtype): - no_matches = -1 * np.ones(self.shape, dtype=np.intp) - return no_matches, no_matches - - target = target.asi8 - - indexer, missing = self._int64index.get_indexer_non_unique(target) - return ensure_platform_int(indexer), missing - def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label. From 73cdfc460d704431266388d519cc9981fdd22cb5 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sun, 13 Sep 2020 07:44:15 -0500 Subject: [PATCH 533/632] REGR: Fix IntegerArray unary ops regression (#36303) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/conftest.py | 13 ++++++ pandas/core/arrays/integer.py | 9 ++++ pandas/core/generic.py | 5 ++- .../tests/arrays/integer/test_arithmetic.py | 38 +++++++++++++++++ pandas/tests/frame/test_operators.py | 2 +- pandas/tests/series/test_operators.py | 41 +++++++++++++++++++ 7 files changed, 107 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 5cbd160f29d66..2457d00eb2173 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :class:`IntegerArray` unary plus and minus operations raising a ``TypeError`` (:issue:`36063`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`) - diff --git a/pandas/conftest.py b/pandas/conftest.py index 5474005a63b8e..e79370e53ead6 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1055,6 +1055,19 @@ def any_nullable_int_dtype(request): return request.param +@pytest.fixture(params=tm.SIGNED_EA_INT_DTYPES) +def any_signed_nullable_int_dtype(request): + """ + Parameterized fixture for any signed nullable integer dtype. + + * 'Int8' + * 'Int16' + * 'Int32' + * 'Int64' + """ + return request.param + + @pytest.fixture(params=tm.ALL_REAL_DTYPES) def any_real_dtype(request): """ diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index dc08e018397bc..94af013d6df2c 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -364,6 +364,15 @@ def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool = False): ) super().__init__(values, mask, copy=copy) + def __neg__(self): + return type(self)(-self._data, self._mask) + + def __pos__(self): + return self + + def __abs__(self): + return type(self)(np.abs(self._data), self._mask) + @classmethod def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "IntegerArray": return integer_array(scalars, dtype=dtype, copy=copy) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9ed9db801d0a8..d7b82923e7488 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1417,7 +1417,10 @@ def __pos__(self): ): arr = operator.pos(values) else: - raise TypeError(f"Unary plus expects numeric dtype, not {values.dtype}") + raise TypeError( + "Unary plus expects bool, numeric, timedelta, " + f"or object dtype, not {values.dtype}" + ) return self.__array_wrap__(arr) def __invert__(self): diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py index d309f6423e0c1..f549a7caeab1d 100644 --- a/pandas/tests/arrays/integer/test_arithmetic.py +++ b/pandas/tests/arrays/integer/test_arithmetic.py @@ -261,3 +261,41 @@ def test_reduce_to_float(op): index=pd.Index(["a", "b"], name="A"), ) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "source, target", + [ + ([1, 2, 3], [-1, -2, -3]), + ([1, 2, None], [-1, -2, None]), + ([-1, 0, 1], [1, 0, -1]), + ], +) +def test_unary_minus_nullable_int(any_signed_nullable_int_dtype, source, target): + dtype = any_signed_nullable_int_dtype + arr = pd.array(source, dtype=dtype) + result = -arr + expected = pd.array(target, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]], +) +def test_unary_plus_nullable_int(any_signed_nullable_int_dtype, source): + dtype = any_signed_nullable_int_dtype + expected = pd.array(source, dtype=dtype) + result = +expected + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "source, target", + [([1, 2, 3], [1, 2, 3]), ([1, -2, None], [1, 2, None]), ([-1, 0, 1], [1, 0, 1])], +) +def test_abs_nullable_int(any_signed_nullable_int_dtype, source, target): + dtype = any_signed_nullable_int_dtype + s = pd.array(source, dtype=dtype) + result = abs(s) + expected = pd.array(target, dtype=dtype) + tm.assert_extension_array_equal(result, expected) diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index fede1ca23a8ce..8cf66e2737249 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -119,7 +119,7 @@ def test_pos_object(self, df): "df", [pd.DataFrame({"a": pd.to_datetime(["2017-01-22", "1970-01-01"])})] ) def test_pos_raises(self, df): - msg = re.escape("Unary plus expects numeric dtype, not datetime64[ns]") + msg = "Unary plus expects .* dtype, not datetime64\\[ns\\]" with pytest.raises(TypeError, match=msg): (+df) with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index e1c9682329271..aee947e738525 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -536,3 +536,44 @@ def test_invert(self): ser = tm.makeStringSeries() ser.name = "series" tm.assert_series_equal(-(ser < 0), ~(ser < 0)) + + @pytest.mark.parametrize( + "source, target", + [ + ([1, 2, 3], [-1, -2, -3]), + ([1, 2, None], [-1, -2, None]), + ([-1, 0, 1], [1, 0, -1]), + ], + ) + def test_unary_minus_nullable_int( + self, any_signed_nullable_int_dtype, source, target + ): + dtype = any_signed_nullable_int_dtype + s = pd.Series(source, dtype=dtype) + result = -s + expected = pd.Series(target, dtype=dtype) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]], + ) + def test_unary_plus_nullable_int(self, any_signed_nullable_int_dtype, source): + dtype = any_signed_nullable_int_dtype + expected = pd.Series(source, dtype=dtype) + result = +expected + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "source, target", + [ + ([1, 2, 3], [1, 2, 3]), + ([1, -2, None], [1, 2, None]), + ([-1, 0, 1], [1, 0, 1]), + ], + ) + def test_abs_nullable_int(self, any_signed_nullable_int_dtype, source, target): + dtype = any_signed_nullable_int_dtype + s = pd.Series(source, dtype=dtype) + result = abs(s) + expected = pd.Series(target, dtype=dtype) + tm.assert_series_equal(result, expected) From 70d0dd0a1a03a7c8c6738d0f8ffd77d131cecde4 Mon Sep 17 00:00:00 2001 From: attack68 <24256554+attack68@users.noreply.github.com> Date: Sun, 13 Sep 2020 14:52:42 +0200 Subject: [PATCH 534/632] ENH: add set_td_classes method for CSS class addition to data cells (#36159) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/io/formats/style.py | 69 ++++++++++++++++++++++++++- pandas/tests/io/formats/test_style.py | 21 ++++++++ 3 files changed, 90 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index a3260f4089e7d..f7c94b6b1846f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -104,7 +104,7 @@ Other enhancements - :meth:`DataFrame.applymap` now supports ``na_action`` (:issue:`23803`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) -- +- `Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`) .. _whatsnew_120.api_breaking.python: diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index b27a4e036e137..5c3a309b0e310 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -171,6 +171,8 @@ def __init__( self.cell_ids = cell_ids self.na_rep = na_rep + self.cell_context: Dict[str, Any] = {} + # display_funcs maps (row, col) -> formatting function def default_display_func(x): @@ -262,7 +264,7 @@ def format_attr(pair): idx_lengths = _get_level_lengths(self.index) col_lengths = _get_level_lengths(self.columns, hidden_columns) - cell_context = dict() + cell_context = self.cell_context n_rlvls = self.data.index.nlevels n_clvls = self.data.columns.nlevels @@ -499,6 +501,70 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Style self._display_funcs[(i, j)] = formatter return self + def set_td_classes(self, classes: DataFrame) -> "Styler": + """ + Add string based CSS class names to data cells that will appear within the + `Styler` HTML result. These classes are added within specified `' in s + assert '' in s + assert '' in s + assert '' in s + def test_colspan_w3(self): # GH 36223 df = pd.DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]]) From 65074db55405a2269a4e2432a9384948eb74691d Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Sun, 13 Sep 2020 06:13:53 -0700 Subject: [PATCH 535/632] PERF: Allow groupby transform with numba engine to be fully parallelizable (#36240) --- asv_bench/benchmarks/groupby.py | 46 ++++++++---- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/groupby/generic.py | 76 +++++++++----------- pandas/core/groupby/groupby.py | 40 ++++++++++- pandas/core/groupby/numba_.py | 69 +++++++++++++++++- pandas/tests/groupby/transform/test_numba.py | 16 +++++ 6 files changed, 187 insertions(+), 62 deletions(-) diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 5ffda03fad80f..bda3ab71d1a00 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -627,33 +627,42 @@ def time_first(self): class TransformEngine: - def setup(self): + + param_names = ["parallel"] + params = [[True, False]] + + def setup(self, parallel): N = 10 ** 3 data = DataFrame( {0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N}, columns=[0, 1], ) + self.parallel = parallel self.grouper = data.groupby(0) - def time_series_numba(self): + def time_series_numba(self, parallel): def function(values, index): return values * 5 - self.grouper[1].transform(function, engine="numba") + self.grouper[1].transform( + function, engine="numba", engine_kwargs={"parallel": self.parallel} + ) - def time_series_cython(self): + def time_series_cython(self, parallel): def function(values): return values * 5 self.grouper[1].transform(function, engine="cython") - def time_dataframe_numba(self): + def time_dataframe_numba(self, parallel): def function(values, index): return values * 5 - self.grouper.transform(function, engine="numba") + self.grouper.transform( + function, engine="numba", engine_kwargs={"parallel": self.parallel} + ) - def time_dataframe_cython(self): + def time_dataframe_cython(self, parallel): def function(values): return values * 5 @@ -661,15 +670,20 @@ def function(values): class AggEngine: - def setup(self): + + param_names = ["parallel"] + params = [[True, False]] + + def setup(self, parallel): N = 10 ** 3 data = DataFrame( {0: [str(i) for i in range(100)] * N, 1: list(range(100)) * N}, columns=[0, 1], ) + self.parallel = parallel self.grouper = data.groupby(0) - def time_series_numba(self): + def time_series_numba(self, parallel): def function(values, index): total = 0 for i, value in enumerate(values): @@ -679,9 +693,11 @@ def function(values, index): total += value * 2 return total - self.grouper[1].agg(function, engine="numba") + self.grouper[1].agg( + function, engine="numba", engine_kwargs={"parallel": self.parallel} + ) - def time_series_cython(self): + def time_series_cython(self, parallel): def function(values): total = 0 for i, value in enumerate(values): @@ -693,7 +709,7 @@ def function(values): self.grouper[1].agg(function, engine="cython") - def time_dataframe_numba(self): + def time_dataframe_numba(self, parallel): def function(values, index): total = 0 for i, value in enumerate(values): @@ -703,9 +719,11 @@ def function(values, index): total += value * 2 return total - self.grouper.agg(function, engine="numba") + self.grouper.agg( + function, engine="numba", engine_kwargs={"parallel": self.parallel} + ) - def time_dataframe_cython(self): + def time_dataframe_cython(self, parallel): def function(values): total = 0 for i, value in enumerate(values): diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f7c94b6b1846f..ad90d42633355 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -207,7 +207,7 @@ Performance improvements - Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`) - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) -- +- Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1552256468ad2..ffd756bed43b6 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -226,10 +226,6 @@ def apply(self, func, *args, **kwargs): def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): - if not callable(func): - raise NotImplementedError( - "Numba engine can only be used with a single function." - ) with group_selection_context(self): data = self._selected_obj result, index = self._aggregate_with_numba( @@ -489,12 +485,21 @@ def _aggregate_named(self, func, *args, **kwargs): @Substitution(klass="Series") @Appender(_transform_template) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + + if maybe_use_numba(engine): + with group_selection_context(self): + data = self._selected_obj + result = self._transform_with_numba( + data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + return self.obj._constructor( + result.ravel(), index=data.index, name=data.name + ) + func = self._get_cython_func(func) or func if not isinstance(func, str): - return self._transform_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) + return self._transform_general(func, *args, **kwargs) elif func not in base.transform_kernel_allowlist: msg = f"'{func}' is not a valid function name for transform(name)" @@ -938,10 +943,6 @@ class DataFrameGroupBy(GroupBy[DataFrame]): def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): - if not callable(func): - raise NotImplementedError( - "Numba engine can only be used with a single function." - ) with group_selection_context(self): data = self._selected_obj result, index = self._aggregate_with_numba( @@ -1290,42 +1291,25 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return self._reindex_output(result) - def _transform_general( - self, func, *args, engine="cython", engine_kwargs=None, **kwargs - ): + def _transform_general(self, func, *args, **kwargs): from pandas.core.reshape.concat import concat applied = [] obj = self._obj_with_exclusions gen = self.grouper.get_iterator(obj, axis=self.axis) - if maybe_use_numba(engine): - numba_func, cache_key = generate_numba_func( - func, engine_kwargs, kwargs, "groupby_transform" - ) - else: - fast_path, slow_path = self._define_paths(func, *args, **kwargs) + fast_path, slow_path = self._define_paths(func, *args, **kwargs) for name, group in gen: object.__setattr__(group, "name", name) - if maybe_use_numba(engine): - values, index = split_for_numba(group) - res = numba_func(values, index, *args) - if cache_key not in NUMBA_FUNC_CACHE: - NUMBA_FUNC_CACHE[cache_key] = numba_func - # Return the result as a DataFrame for concatenation later - res = self.obj._constructor( - res, index=group.index, columns=group.columns - ) - else: - # Try slow path and fast path. - try: - path, res = self._choose_path(fast_path, slow_path, group) - except TypeError: - return self._transform_item_by_item(obj, fast_path) - except ValueError as err: - msg = "transform must return a scalar value for each group" - raise ValueError(msg) from err + # Try slow path and fast path. + try: + path, res = self._choose_path(fast_path, slow_path, group) + except TypeError: + return self._transform_item_by_item(obj, fast_path) + except ValueError as err: + msg = "transform must return a scalar value for each group" + raise ValueError(msg) from err if isinstance(res, Series): @@ -1361,13 +1345,19 @@ def _transform_general( @Appender(_transform_template) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + if maybe_use_numba(engine): + with group_selection_context(self): + data = self._selected_obj + result = self._transform_with_numba( + data, func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + return self.obj._constructor(result, index=data.index, columns=data.columns) + # optimized transforms func = self._get_cython_func(func) or func if not isinstance(func, str): - return self._transform_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) + return self._transform_general(func, *args, **kwargs) elif func not in base.transform_kernel_allowlist: msg = f"'{func}' is not a valid function name for transform(name)" @@ -1393,9 +1383,7 @@ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): ): return self._transform_fast(result) - return self._transform_general( - func, engine=engine, engine_kwargs=engine_kwargs, *args, **kwargs - ) + return self._transform_general(func, *args, **kwargs) def _transform_fast(self, result: DataFrame) -> DataFrame: """ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 8a55d438cf8d4..30bd53a3ddff1 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1056,6 +1056,41 @@ def _cython_agg_general( return self._wrap_aggregated_output(output, index=self.grouper.result_index) + def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby transform routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + group_keys = self.grouper._get_group_keys() + labels, _, n_groups = self.grouper.group_info + sorted_index = get_group_index_sorter(labels, n_groups) + sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + starts, ends = lib.generate_slices(sorted_labels, n_groups) + cache_key = (func, "groupby_transform") + if cache_key in NUMBA_FUNC_CACHE: + numba_transform_func = NUMBA_FUNC_CACHE[cache_key] + else: + numba_transform_func = numba_.generate_numba_transform_func( + tuple(args), kwargs, func, engine_kwargs + ) + result = numba_transform_func( + sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) + ) + if cache_key not in NUMBA_FUNC_CACHE: + NUMBA_FUNC_CACHE[cache_key] = numba_transform_func + + # result values needs to be resorted to their original positions since we + # evaluated the data sorted by group + return result.take(np.argsort(sorted_index), axis=0) + def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): """ Perform groupby aggregation routine with the numba engine. @@ -1064,6 +1099,10 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) to generate the indices of each group in the sorted data and then passes the data and indices into a Numba jitted function. """ + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) group_keys = self.grouper._get_group_keys() labels, _, n_groups = self.grouper.group_info sorted_index = get_group_index_sorter(labels, n_groups) @@ -1072,7 +1111,6 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) starts, ends = lib.generate_slices(sorted_labels, n_groups) cache_key = (func, "groupby_agg") if cache_key in NUMBA_FUNC_CACHE: - # Return an already compiled version of roll_apply if available numba_agg_func = NUMBA_FUNC_CACHE[cache_key] else: numba_agg_func = numba_.generate_numba_agg_func( diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index aebe60f797fcd..a2dfcd7bddd53 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -153,7 +153,7 @@ def generate_numba_agg_func( loop_range = range @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) - def group_apply( + def group_agg( values: np.ndarray, index: np.ndarray, begin: np.ndarray, @@ -169,4 +169,69 @@ def group_apply( result[i, j] = numba_func(group, group_index, *args) return result - return group_apply + return group_agg + + +def generate_numba_transform_func( + args: Tuple, + kwargs: Dict[str, Any], + func: Callable[..., Scalar], + engine_kwargs: Optional[Dict[str, bool]], +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: + """ + Generate a numba jitted transform function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the rolling apply function. + + Parameters + ---------- + args : tuple + *args to be passed into the function + kwargs : dict + **kwargs to be passed into the function + func : function + function to be applied to each window and will be JITed + engine_kwargs : dict + dictionary of arguments to be passed into numba.jit + + Returns + ------- + Numba function + """ + nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + + check_kwargs_and_nopython(kwargs, nopython) + + validate_udf(func) + + numba_func = jit_user_function(func, nopython, nogil, parallel) + + numba = import_optional_dependency("numba") + + if parallel: + loop_range = numba.prange + else: + loop_range = range + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_transform( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_groups: int, + num_columns: int, + ) -> np.ndarray: + result = np.empty((len(values), num_columns)) + for i in loop_range(num_groups): + group_index = index[begin[i] : end[i]] + for j in loop_range(num_columns): + group = values[begin[i] : end[i], j] + result[begin[i] : end[i], j] = numba_func(group, group_index, *args) + return result + + return group_transform diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index 87723cd7c8f50..fcaa5ab13599a 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -127,3 +127,19 @@ def func_1(values, index): with option_context("compute.use_numba", True): result = grouped.transform(func_1, engine=None) tm.assert_frame_equal(expected, result) + + +@td.skip_if_no("numba", "0.46.0") +@pytest.mark.parametrize( + "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}], +) +def test_multifunc_notimplimented(agg_func): + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] + ) + grouped = data.groupby(0) + with pytest.raises(NotImplementedError, match="Numba engine can"): + grouped.transform(agg_func, engine="numba") + + with pytest.raises(NotImplementedError, match="Numba engine can"): + grouped[1].transform(agg_func, engine="numba") From 068d1b5be234231e5a829399d58136f5028834eb Mon Sep 17 00:00:00 2001 From: Leonardus Chen Date: Sun, 13 Sep 2020 21:15:07 +0800 Subject: [PATCH 536/632] BUG: GH36212 DataFrame agg() raises error when DataFrame column name is `name` (#36224) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/base.py | 7 +++++-- pandas/tests/frame/apply/test_frame_apply.py | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ad90d42633355..89d94dc0cabd6 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -327,6 +327,7 @@ Reshaping - Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) - Bug in :func:`union_indexes` where input index names are not preserved in some cases. Affects :func:`concat` and :class:`DataFrame` constructor (:issue:`13475`) - Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`) +- Bug in :meth:`DataFrame.agg` with ``func={'name':}`` incorrectly raising ``TypeError`` when ``DataFrame.columns==['Name']`` (:issue:`36212`) - Sparse diff --git a/pandas/core/base.py b/pandas/core/base.py index a688302b99724..53378c0e0e252 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -470,9 +470,12 @@ def is_any_frame() -> bool: try: result = DataFrame(result) except ValueError: - # we have a dict of scalars - result = Series(result, name=getattr(self, "name", None)) + + # GH 36212 use name only if self is a series + name = self.name if (self.ndim == 1) else None + + result = Series(result, name=name) return result, True elif is_list_like(arg): diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 1662f9e2fff56..f75d7c13665f9 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1147,6 +1147,21 @@ def test_demo(self): ) tm.assert_frame_equal(result.reindex_like(expected), expected) + def test_agg_with_name_as_column_name(self): + # GH 36212 - Column name is "name" + data = {"name": ["foo", "bar"]} + df = pd.DataFrame(data) + + # result's name should be None + result = df.agg({"name": "count"}) + expected = pd.Series({"name": 2}) + tm.assert_series_equal(result, expected) + + # Check if name is still preserved when aggregating series instead + result = df["name"].agg({"name": "count"}) + expected = pd.Series({"name": 2}, name="name") + tm.assert_series_equal(result, expected) + def test_agg_multiple_mixed_no_warning(self): # GH 20909 mdf = pd.DataFrame( From 752cd42ee5b5a3909c99de1ac35077d1446fce5b Mon Sep 17 00:00:00 2001 From: Kaiqi Dong Date: Sun, 13 Sep 2020 17:53:37 +0200 Subject: [PATCH 537/632] BUG: Fixe unintentionally added suffix in DataFrame.apply/agg and Series.apply/agg (#36231) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/core/aggregation.py | 3 +-- pandas/core/groupby/generic.py | 1 + pandas/tests/frame/apply/test_frame_apply.py | 11 +++++++++++ pandas/tests/groupby/aggregate/test_aggregate.py | 15 +++++++++++++++ pandas/tests/series/apply/test_series_apply.py | 8 ++++++++ 6 files changed, 37 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 2457d00eb2173..d789518f93f6d 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -14,6 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :meth:`DataFrame.agg`, :meth:`DataFrame.apply`, :meth:`Series.agg`, and :meth:`Series.apply` where internal suffix is exposed to the users when no relabelling is applied (:issue:`36189`) - Fixed regression in :class:`IntegerArray` unary plus and minus operations raising a ``TypeError`` (:issue:`36063`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`) diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 8b74fe01d0dc0..c123156495924 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -63,7 +63,7 @@ def reconstruct_func( Examples -------- >>> reconstruct_func(None, **{"foo": ("col", "min")}) - (True, defaultdict(None, {'col': ['min']}), ('foo',), array([0])) + (True, defaultdict(, {'col': ['min']}), ('foo',), array([0])) >>> reconstruct_func("min") (False, 'min', None, None) @@ -87,7 +87,6 @@ def reconstruct_func( if relabeling: func, columns, order = normalize_keyword_aggregation(kwargs) - func = maybe_mangle_lambdas(func) return relabeling, func, columns, order diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index ffd756bed43b6..d4e673d2e538c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -951,6 +951,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) return self.obj._constructor(result, index=index, columns=data.columns) relabeling, func, columns, order = reconstruct_func(func, **kwargs) + func = maybe_mangle_lambdas(func) result, how = self._aggregate(func, *args, **kwargs) if how is None: diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index f75d7c13665f9..e25b681c8c7c3 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1534,3 +1534,14 @@ def test_apply_empty_list_reduce(): result = df.apply(lambda x: [], result_type="reduce") expected = pd.Series({"a": [], "b": []}, dtype=object) tm.assert_series_equal(result, expected) + + +def test_apply_no_suffix_index(): + # GH36189 + pdf = pd.DataFrame([[4, 9]] * 3, columns=["A", "B"]) + result = pdf.apply(["sum", lambda x: x.sum(), lambda x: x.sum()]) + expected = pd.DataFrame( + {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "", ""] + ) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 8fe450fe6abfc..c96333bc48dd4 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1153,3 +1153,18 @@ def test_nonagg_agg(): expected = g.agg("cumsum") tm.assert_frame_equal(result, expected) + + +def test_agg_no_suffix_index(): + # GH36189 + df = pd.DataFrame([[4, 9]] * 3, columns=["A", "B"]) + result = df.agg(["sum", lambda x: x.sum(), lambda x: x.sum()]) + expected = pd.DataFrame( + {"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "", ""] + ) + tm.assert_frame_equal(result, expected) + + # test Series case + result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()]) + expected = pd.Series([12, 12, 12], index=["sum", "", ""], name="A") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py index 827f466e23106..ce8759c4ba76d 100644 --- a/pandas/tests/series/apply/test_series_apply.py +++ b/pandas/tests/series/apply/test_series_apply.py @@ -445,6 +445,14 @@ def test_agg_cython_table_raises(self, series, func, expected): # e.g. Series('a b'.split()).cumprod() will raise series.agg(func) + def test_series_apply_no_suffix_index(self): + # GH36189 + s = pd.Series([4] * 3) + result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()]) + expected = pd.Series([12, 12, 12], index=["sum", "", ""]) + + tm.assert_series_equal(result, expected) + class TestSeriesMap: def test_map(self, datetime_series): From 3007baf2f43df40e94f21479f31b341e8e9a6647 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 13:19:41 -0700 Subject: [PATCH 538/632] CLN: remove CategoricalIndex._create_from_codes (#36342) --- pandas/core/arrays/categorical.py | 13 +++++------ pandas/core/indexes/category.py | 39 +++++++------------------------ 2 files changed, 15 insertions(+), 37 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 4fa6b73932aa4..27e0a198b62a2 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1376,7 +1376,7 @@ def value_counts(self, dropna=True): count = np.bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) - ix = self._constructor(ix, dtype=self.dtype, fastpath=True) + ix = self._from_backing_data(ix) return Series(count, index=CategoricalIndex(ix), dtype="int64") @@ -1546,9 +1546,8 @@ def sort_values( if inplace: self._codes = self._codes[sorted_idx] else: - return self._constructor( - values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True - ) + codes = self._codes[sorted_idx] + return self._from_backing_data(codes) def _values_for_rank(self): """ @@ -1583,7 +1582,7 @@ def _values_for_rank(self): def view(self, dtype=None): if dtype is not None: raise NotImplementedError(dtype) - return self._constructor(values=self._codes, dtype=self.dtype, fastpath=True) + return self._from_backing_data(self._ndarray) def to_dense(self): """ @@ -1691,7 +1690,7 @@ def fillna(self, value=None, method=None, limit=None): f"or Series, but you passed a {type(value).__name__}" ) - return self._constructor(codes, dtype=self.dtype, fastpath=True) + return self._from_backing_data(codes) # ------------------------------------------------------------------ # NDArrayBackedExtensionArray compat @@ -2098,7 +2097,7 @@ def mode(self, dropna=True): good = self._codes != -1 codes = self._codes[good] codes = sorted(htable.mode_int64(ensure_int64(codes), dropna)) - return self._constructor(values=codes, dtype=self.dtype, fastpath=True) + return self._from_backing_data(codes) # ------------------------------------------------------------------ # ExtensionArray Interface diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 19a0910a7a282..85ef3e58576e3 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -211,29 +211,6 @@ def __new__( return cls._simple_new(data, name=name) - def _create_from_codes(self, codes, dtype=None, name=None): - """ - *this is an internal non-public method* - - create the correct categorical from codes - - Parameters - ---------- - codes : new codes - dtype: CategoricalDtype, defaults to existing - name : optional name attribute, defaults to existing - - Returns - ------- - CategoricalIndex - """ - if dtype is None: - dtype = self.dtype - if name is None: - name = self.name - cat = Categorical.from_codes(codes, dtype=dtype) - return CategoricalIndex(cat, name=name) - @classmethod def _simple_new(cls, values: Categorical, name: Label = None): assert isinstance(values, Categorical), type(values) @@ -495,7 +472,8 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): codes = new_target.codes.copy() codes[indexer == -1] = cats[missing] - new_target = self._create_from_codes(codes) + cat = self._data._from_backing_data(codes) + new_target = type(self)._simple_new(cat, name=self.name) # we always want to return an Index type here # to be consistent with .reindex for other index types (e.g. they don't @@ -695,7 +673,9 @@ def delete(self, loc): ------- new_index : Index """ - return self._create_from_codes(np.delete(self.codes, loc)) + codes = np.delete(self.codes, loc) + cat = self._data._from_backing_data(codes) + return type(self)._simple_new(cat, name=self.name) def insert(self, loc: int, item): """ @@ -720,15 +700,14 @@ def insert(self, loc: int, item): codes = self.codes codes = np.concatenate((codes[:loc], [code], codes[loc:])) - return self._create_from_codes(codes) + cat = self._data._from_backing_data(codes) + return type(self)._simple_new(cat, name=self.name) def _concat(self, to_concat, name): # if calling index is category, don't check dtype of others codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat]) - result = self._create_from_codes(codes, name=name) - # if name is None, _create_from_codes sets self.name - result.name = name - return result + cat = self._data._from_backing_data(codes) + return type(self)._simple_new(cat, name=name) def _delegate_method(self, name: str, *args, **kwargs): """ method delegation to the ._values """ From fef183064f538c24b7805599de75c71b8d37a0bc Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 13:20:56 -0700 Subject: [PATCH 539/632] REF: move ShallowMixin to groupby.base (#36341) --- pandas/core/base.py | 17 +---------------- pandas/core/groupby/base.py | 17 ++++++++++++++++- pandas/core/resample.py | 6 +++--- pandas/core/window/common.py | 4 ++-- pandas/core/window/rolling.py | 5 +++-- 5 files changed, 25 insertions(+), 24 deletions(-) diff --git a/pandas/core/base.py b/pandas/core/base.py index 53378c0e0e252..4d5cddc086b2a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -4,7 +4,7 @@ import builtins import textwrap -from typing import Any, Callable, Dict, FrozenSet, List, Optional, Union +from typing import Any, Callable, Dict, FrozenSet, Optional, Union import numpy as np @@ -577,21 +577,6 @@ def _is_builtin_func(self, arg): return self._builtin_table.get(arg, arg) -class ShallowMixin: - _attributes: List[str] = [] - - def _shallow_copy(self, obj, **kwargs): - """ - return a new object with the replacement attributes - """ - if isinstance(obj, self._constructor): - obj = obj.obj - for attr in self._attributes: - if attr not in kwargs: - kwargs[attr] = getattr(self, attr) - return self._constructor(obj, **kwargs) - - class IndexOpsMixin: """ Common ops mixin to support a unified interface / docs for Series / Index diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 999873e7b81e4..9cfd13f95ca0e 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -13,7 +13,22 @@ OutputKey = collections.namedtuple("OutputKey", ["label", "position"]) -class GroupByMixin(PandasObject): +class ShallowMixin(PandasObject): + _attributes: List[str] = [] + + def _shallow_copy(self, obj, **kwargs): + """ + return a new object with the replacement attributes + """ + if isinstance(obj, self._constructor): + obj = obj.obj + for attr in self._attributes: + if attr not in kwargs: + kwargs[attr] = getattr(self, attr) + return self._constructor(obj, **kwargs) + + +class GotItemMixin(PandasObject): """ Provide the groupby facilities to the mixed object. """ diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 4ba253e76128e..29b7bd7a63faa 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -22,9 +22,9 @@ from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries import pandas.core.algorithms as algos -from pandas.core.base import DataError, ShallowMixin +from pandas.core.base import DataError from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.groupby.base import GroupByMixin +from pandas.core.groupby.base import GotItemMixin, ShallowMixin from pandas.core.groupby.generic import SeriesGroupBy from pandas.core.groupby.groupby import ( BaseGroupBy, @@ -954,7 +954,7 @@ def h(self, _method=method): setattr(Resampler, method, h) -class _GroupByMixin(GroupByMixin): +class _GroupByMixin(GotItemMixin): """ Provide the groupby facilities. """ diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index df60d2dcf5e84..6452eb8c6b3a9 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -7,7 +7,7 @@ from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries -from pandas.core.groupby.base import GroupByMixin +from pandas.core.groupby.base import GotItemMixin from pandas.core.indexes.api import MultiIndex from pandas.core.shared_docs import _shared_docs @@ -43,7 +43,7 @@ def f(x): return outer -class WindowGroupByMixin(GroupByMixin): +class WindowGroupByMixin(GotItemMixin): """ Provide the groupby facilities. """ diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 648ab4d25be83..d094cc7d70a21 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -46,9 +46,10 @@ ) from pandas.core.dtypes.missing import notna -from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin +from pandas.core.base import DataError, SelectionMixin import pandas.core.common as com from pandas.core.construction import extract_array +from pandas.core.groupby.base import ShallowMixin from pandas.core.indexes.api import Index, MultiIndex from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.core.window.common import ( @@ -146,7 +147,7 @@ def func(arg, window, min_periods=None): return func -class _Window(PandasObject, ShallowMixin, SelectionMixin): +class _Window(ShallowMixin, SelectionMixin): _attributes: List[str] = [ "window", "min_periods", From 4859be9cd145e3da0a7f596c3e27636a58920c1c Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sun, 13 Sep 2020 21:21:17 +0100 Subject: [PATCH 540/632] DOC/CLN: remove versionadded/changed:: 0.23 (#36338) --- doc/source/development/extending.rst | 2 -- doc/source/getting_started/install.rst | 2 -- doc/source/user_guide/advanced.rst | 2 -- doc/source/user_guide/basics.rst | 2 -- doc/source/user_guide/categorical.rst | 2 -- doc/source/user_guide/dsintro.rst | 2 -- doc/source/user_guide/io.rst | 2 -- doc/source/user_guide/merging.rst | 4 --- doc/source/user_guide/missing_data.rst | 8 ++--- doc/source/user_guide/reshaping.rst | 2 -- doc/source/user_guide/text.rst | 6 ---- doc/source/user_guide/timedeltas.rst | 8 +---- doc/source/user_guide/timeseries.rst | 4 --- pandas/_libs/tslibs/nattype.pyx | 8 ++--- pandas/_libs/tslibs/timestamps.pyx | 9 ++--- pandas/core/algorithms.py | 2 -- pandas/core/arrays/base.py | 2 -- pandas/core/arrays/categorical.py | 2 -- pandas/core/arrays/datetimes.py | 4 --- pandas/core/arrays/interval.py | 10 ------ pandas/core/dtypes/base.py | 2 -- pandas/core/frame.py | 47 +++----------------------- pandas/core/generic.py | 19 ----------- pandas/core/groupby/groupby.py | 3 -- pandas/core/indexes/base.py | 4 --- pandas/core/resample.py | 2 -- pandas/core/reshape/concat.py | 1 - pandas/core/reshape/melt.py | 15 +++----- pandas/core/reshape/reshape.py | 2 -- pandas/core/reshape/tile.py | 2 -- pandas/core/series.py | 10 ++---- pandas/core/shared_docs.py | 6 ++-- pandas/core/strings.py | 3 -- pandas/core/tools/datetimes.py | 2 -- pandas/core/window/ewm.py | 1 - pandas/core/window/expanding.py | 1 - pandas/core/window/rolling.py | 4 --- pandas/io/excel/_base.py | 3 -- pandas/io/formats/style.py | 4 --- pandas/io/html.py | 4 --- pandas/io/json/_json.py | 3 -- pandas/io/stata.py | 7 ---- pandas/plotting/_core.py | 4 --- 43 files changed, 21 insertions(+), 211 deletions(-) diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst index 1e6b2c646fdfd..46c2cbbe39b34 100644 --- a/doc/source/development/extending.rst +++ b/doc/source/development/extending.rst @@ -73,8 +73,6 @@ applies only to certain dtypes. Extension types --------------- -.. versionadded:: 0.23.0 - .. warning:: The :class:`pandas.api.extensions.ExtensionDtype` and :class:`pandas.api.extensions.ExtensionArray` APIs are new and diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index c9ac1b0d284a3..fde9f567cc3ec 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -301,8 +301,6 @@ Optional dependencies for parsing HTML One of the following combinations of libraries is needed to use the top-level :func:`~pandas.read_html` function: -.. versionchanged:: 0.23.0 - * `BeautifulSoup4`_ and `html5lib`_ * `BeautifulSoup4`_ and `lxml`_ * `BeautifulSoup4`_ and `html5lib`_ and `lxml`_ diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index a0331dd632583..8cd35e94ae743 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -1065,8 +1065,6 @@ are closed on. Intervals are closed on the right side by default. pd.interval_range(start=0, end=4, closed='neither') -.. versionadded:: 0.23.0 - Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements in the resulting ``IntervalIndex``: diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst index 87359042928eb..6b13319061ea4 100644 --- a/doc/source/user_guide/basics.rst +++ b/doc/source/user_guide/basics.rst @@ -1877,8 +1877,6 @@ different columns. By indexes and values ~~~~~~~~~~~~~~~~~~~~~ -.. versionadded:: 0.23.0 - Strings passed as the ``by`` parameter to :meth:`DataFrame.sort_values` may refer to either columns or index level names. diff --git a/doc/source/user_guide/categorical.rst b/doc/source/user_guide/categorical.rst index 7def45ddc13e2..b7475ae7bb132 100644 --- a/doc/source/user_guide/categorical.rst +++ b/doc/source/user_guide/categorical.rst @@ -112,8 +112,6 @@ only labels present in a given column are categories: df['B'] -.. versionadded:: 0.23.0 - Analogously, all columns in an existing ``DataFrame`` can be batch converted using :meth:`DataFrame.astype`: .. ipython:: python diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst index 23bd44c1969a5..0e6767e88edc2 100644 --- a/doc/source/user_guide/dsintro.rst +++ b/doc/source/user_guide/dsintro.rst @@ -597,8 +597,6 @@ to be inserted (for example, a ``Series`` or NumPy array), or a function of one argument to be called on the ``DataFrame``. A *copy* of the original DataFrame is returned, with the new values inserted. -.. versionchanged:: 0.23.0 - Starting with Python 3.6 the order of ``**kwargs`` is preserved. This allows for *dependent* assignment, where an expression later in ``**kwargs`` can refer to a column created earlier in the same :meth:`~DataFrame.assign`. diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 4dfabaa99fff6..a0b16e5fe5d1c 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -2360,8 +2360,6 @@ A few notes on the generated table schema: then ``level_`` is used. -.. versionadded:: 0.23.0 - ``read_json`` also accepts ``orient='table'`` as an argument. This allows for the preservation of metadata such as dtypes and index names in a round-trippable manner. diff --git a/doc/source/user_guide/merging.rst b/doc/source/user_guide/merging.rst index 0639e4a7bb5e4..bc8fc5a7e4f4e 100644 --- a/doc/source/user_guide/merging.rst +++ b/doc/source/user_guide/merging.rst @@ -175,8 +175,6 @@ behavior: .. warning:: - .. versionchanged:: 0.23.0 - The default behavior with ``join='outer'`` is to sort the other axis (columns in this case). In a future version of pandas, the default will be to not sort. We specified ``sort=False`` to opt in to the new @@ -1198,8 +1196,6 @@ done using the following code. Merging on a combination of columns and index levels ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. versionadded:: 0.23 - Strings passed as the ``on``, ``left_on``, and ``right_on`` parameters may refer to either column names or index level names. This enables merging ``DataFrame`` instances on a combination of index levels and columns without diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index 28206192dd161..06a7c6e33768e 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -336,10 +336,6 @@ examined :ref:`in the API `. Interpolation ~~~~~~~~~~~~~ -.. versionadded:: 0.23.0 - - The ``limit_area`` keyword argument was added. - Both Series and DataFrame objects have :meth:`~DataFrame.interpolate` that, by default, performs linear interpolation at missing data points. @@ -507,8 +503,8 @@ By default, ``NaN`` values are filled in a ``forward`` direction. Use ser.interpolate(limit_direction='both') By default, ``NaN`` values are filled whether they are inside (surrounded by) -existing valid values, or outside existing valid values. Introduced in v0.23 -the ``limit_area`` parameter restricts filling to either inside or outside values. +existing valid values, or outside existing valid values. The ``limit_area`` +parameter restricts filling to either inside or outside values. .. ipython:: python diff --git a/doc/source/user_guide/reshaping.rst b/doc/source/user_guide/reshaping.rst index aa6bf44547040..1b90aeb00cf9c 100644 --- a/doc/source/user_guide/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -679,8 +679,6 @@ To choose another dtype, use the ``dtype`` argument: pd.get_dummies(df, dtype=bool).dtypes -.. versionadded:: 0.23.0 - .. _reshaping.factorize: diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst index 3408b98b3179d..e03ba74f95c90 100644 --- a/doc/source/user_guide/text.rst +++ b/doc/source/user_guide/text.rst @@ -282,8 +282,6 @@ following code will cause trouble because of the regular expression meaning of # We need to escape the special character (for >1 len patterns) dollars.str.replace(r'-\$', '-') -.. versionadded:: 0.23.0 - If you do want literal replacement of a string (equivalent to :meth:`str.replace`), you can set the optional ``regex`` parameter to ``False``, rather than escaping each character. In this case both ``pat`` @@ -390,8 +388,6 @@ Missing values on either side will result in missing values in the result as wel Concatenating a Series and something array-like into a Series ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. versionadded:: 0.23.0 - The parameter ``others`` can also be two-dimensional. In this case, the number or rows must match the lengths of the calling ``Series`` (or ``Index``). .. ipython:: python @@ -404,8 +400,6 @@ The parameter ``others`` can also be two-dimensional. In this case, the number o Concatenating a Series and an indexed object into a Series, with alignment ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. versionadded:: 0.23.0 - For concatenation with a ``Series`` or ``DataFrame``, it is possible to align the indexes before concatenation by setting the ``join``-keyword. diff --git a/doc/source/user_guide/timedeltas.rst b/doc/source/user_guide/timedeltas.rst index 3439a0a4c13c7..3979ad1f3e949 100644 --- a/doc/source/user_guide/timedeltas.rst +++ b/doc/source/user_guide/timedeltas.rst @@ -18,7 +18,7 @@ parsing, and attributes. Parsing ------- -You can construct a ``Timedelta`` scalar through various arguments: +You can construct a ``Timedelta`` scalar through various arguments, including `ISO 8601 Duration`_ strings. .. ipython:: python @@ -53,10 +53,6 @@ You can construct a ``Timedelta`` scalar through various arguments: pd.Timedelta('P0DT0H1M0S') pd.Timedelta('P0DT0H0M0.000000123S') -.. versionadded:: 0.23.0 - - Added constructor for `ISO 8601 Duration`_ strings - :ref:`DateOffsets` (``Day, Hour, Minute, Second, Milli, Micro, Nano``) can also be used in construction. .. ipython:: python @@ -387,8 +383,6 @@ The ``freq`` parameter can passed a variety of :ref:`frequency aliases DataFram Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. - .. versionadded:: 0.23.0 - Returns ------- DataFrame @@ -2115,7 +2104,6 @@ def to_stata( support Unicode characters, and version 119 supports more than 32,767 variables. - .. versionadded:: 0.23.0 .. versionchanged:: 1.0.0 Added support for formats 118 and 119. @@ -2125,9 +2113,6 @@ def to_stata( format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. - - .. versionadded:: 0.23.0 - compression : str or dict, default 'infer' For on-the-fly compression of the output dta. If string, specifies compression mode. If dict, value at key 'method' specifies @@ -2466,9 +2451,6 @@ def to_html( table_id : str, optional A css id is included in the opening `
    ') != -1 + @td.skip_if_no_mpl class TestStylerMatplotlibDep: From 92bf41a4707a8386fe3055bcbc959954da240071 Mon Sep 17 00:00:00 2001 From: Douglas Hanley Date: Fri, 7 Aug 2020 18:22:55 -0400 Subject: [PATCH 237/632] BUG: assign consensus name to index union in array case GH13475 (#35338) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/indexes/api.py | 5 ++-- pandas/tests/frame/test_constructors.py | 36 ++++++++++++++++++++++++ pandas/tests/reshape/test_concat.py | 37 +++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 74ef5178eb004..33e70daa55e66 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -162,6 +162,7 @@ Reshaping ^^^^^^^^^ - Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) +- Bug in :func:`union_indexes` where input index names are not preserved in some cases. Affects :func:`concat` and :class:`DataFrame` constructor (:issue:`13475`) - Sparse diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 4c5a70f4088ee..30cc8cf480dcf 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -218,9 +218,8 @@ def conv(i): return result elif kind == "array": index = indexes[0] - for other in indexes[1:]: - if not index.equals(other): - return _unique_indices(indexes) + if not all(index.equals(other) for other in indexes[1:]): + index = _unique_indices(indexes) name = get_consensus_names(indexes)[0] if name != index.name: diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index b78bb1c492ef4..d0f774344a33d 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1618,6 +1618,42 @@ def test_constructor_Series_differently_indexed(self): tm.assert_index_equal(df2.index, other_index) tm.assert_frame_equal(df2, exp2) + @pytest.mark.parametrize( + "name_in1,name_in2,name_in3,name_out", + [ + ("idx", "idx", "idx", "idx"), + ("idx", "idx", None, "idx"), + ("idx", None, None, "idx"), + ("idx1", "idx2", None, None), + ("idx1", "idx1", "idx2", None), + ("idx1", "idx2", "idx3", None), + (None, None, None, None), + ], + ) + def test_constructor_index_names(self, name_in1, name_in2, name_in3, name_out): + # GH13475 + indices = [ + pd.Index(["a", "b", "c"], name=name_in1), + pd.Index(["b", "c", "d"], name=name_in2), + pd.Index(["c", "d", "e"], name=name_in3), + ] + series = { + c: pd.Series([0, 1, 2], index=i) for i, c in zip(indices, ["x", "y", "z"]) + } + result = pd.DataFrame(series) + + exp_ind = pd.Index(["a", "b", "c", "d", "e"], name=name_out) + expected = pd.DataFrame( + { + "x": [0, 1, 2, np.nan, np.nan], + "y": [np.nan, 0, 1, 2, np.nan], + "z": [np.nan, np.nan, 0, 1, 2], + }, + index=exp_ind, + ) + + tm.assert_frame_equal(result, expected) + def test_constructor_manager_resize(self, float_frame): index = list(float_frame.index[:5]) columns = list(float_frame.columns[:3]) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 0159fabd04d59..38cf2cc2402a1 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1279,6 +1279,43 @@ def test_concat_ignore_index(self, sort): tm.assert_frame_equal(v1, expected) + @pytest.mark.parametrize( + "name_in1,name_in2,name_in3,name_out", + [ + ("idx", "idx", "idx", "idx"), + ("idx", "idx", None, "idx"), + ("idx", None, None, "idx"), + ("idx1", "idx2", None, None), + ("idx1", "idx1", "idx2", None), + ("idx1", "idx2", "idx3", None), + (None, None, None, None), + ], + ) + def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out): + # GH13475 + indices = [ + pd.Index(["a", "b", "c"], name=name_in1), + pd.Index(["b", "c", "d"], name=name_in2), + pd.Index(["c", "d", "e"], name=name_in3), + ] + frames = [ + pd.DataFrame({c: [0, 1, 2]}, index=i) + for i, c in zip(indices, ["x", "y", "z"]) + ] + result = pd.concat(frames, axis=1) + + exp_ind = pd.Index(["a", "b", "c", "d", "e"], name=name_out) + expected = pd.DataFrame( + { + "x": [0, 1, 2, np.nan, np.nan], + "y": [np.nan, 0, 1, 2, np.nan], + "z": [np.nan, np.nan, 0, 1, 2], + }, + index=exp_ind, + ) + + tm.assert_frame_equal(result, expected) + def test_concat_multiindex_with_keys(self): index = MultiIndex( levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]], From 47c17cbac9255c2e9b784eae1a8a7d7dfee19b59 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Fri, 7 Aug 2020 18:17:14 -0500 Subject: [PATCH 238/632] REGR: Fix conversion of mixed dtype DataFrame to numpy str (#35473) * Handle str better * Doc and test * Make an elif * Add back import --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/frame.py | 2 ++ pandas/core/internals/managers.py | 3 +++ pandas/tests/frame/test_api.py | 7 +++++++ 4 files changed, 13 insertions(+) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index ade88a6127014..f0ad9d1ca3b0f 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index aabdac16e9a1a..b66b6b92336f2 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1371,6 +1371,8 @@ def to_numpy( result = self._mgr.as_array( transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value ) + if result.dtype is not dtype: + result = np.array(result, dtype=dtype, copy=False) return result diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 4b85f92391dce..aa74d173d69b3 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -19,6 +19,7 @@ from pandas.core.dtypes.common import ( DT64NS_DTYPE, is_datetimelike_v_numeric, + is_dtype_equal, is_extension_array_dtype, is_list_like, is_numeric_v_string_like, @@ -865,6 +866,8 @@ def _interleave(self, dtype=None, na_value=lib.no_default) -> np.ndarray: dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = "object" + elif is_dtype_equal(dtype, str): + dtype = "object" result = np.empty(self.shape, dtype=dtype) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 2b79fc8cd3406..cc57a3970d18b 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -367,6 +367,13 @@ def test_to_numpy_copy(self): assert df.to_numpy(copy=False).base is arr assert df.to_numpy(copy=True).base is not arr + def test_to_numpy_mixed_dtype_to_str(self): + # https://github.com/pandas-dev/pandas/issues/35455 + df = pd.DataFrame([[pd.Timestamp("2020-01-01 00:00:00"), 100.0]]) + result = df.to_numpy(dtype=str) + expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str) + tm.assert_numpy_array_equal(result, expected) + def test_swapaxes(self): df = DataFrame(np.random.randn(10, 5)) tm.assert_frame_equal(df.T, df.swapaxes(0, 1)) From fa92ece122e18d65754dcd26324ed80326f249c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Fri, 7 Aug 2020 21:27:39 -0400 Subject: [PATCH 239/632] DOC: corrected statement about compression support for file objects in to_csv (#35615) --- pandas/core/generic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 87f25f578c3c6..834cd992f5650 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3088,7 +3088,7 @@ def to_csv( .. versionchanged:: 1.2.0 - Compression is supported for non-binary file objects. + Compression is supported for binary file objects. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` From a9cb64ad105f8f7051f9d77b66e1bfe7c09d386f Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 8 Aug 2020 10:47:43 +0100 Subject: [PATCH 240/632] CI: Linux py36_locale failures with pytest DeprecationWarning (#35621) --- ci/deps/azure-36-locale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-36-locale.yaml index a9b9a5a47ccf5..3034ed3dc43af 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-36-locale.yaml @@ -7,7 +7,7 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1 + - pytest>=5.0.1,<6.0.0 # https://github.com/pandas-dev/pandas/issues/35620 - pytest-xdist>=1.21 - pytest-asyncio - hypothesis>=3.58.0 From 188ce7301329e6791df4a71b87b28922e00a6bf8 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 8 Aug 2020 07:51:39 -0700 Subject: [PATCH 241/632] REF: Avoid post-processing in blockwise op (#35356) --- pandas/core/groupby/generic.py | 97 ++++++++++++++++------------------ 1 file changed, 47 insertions(+), 50 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1fed193dba02c..53242c0332a8c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1029,11 +1029,36 @@ def _cython_agg_blocks( agg_blocks: List[Block] = [] new_items: List[np.ndarray] = [] deleted_items: List[np.ndarray] = [] - # Some object-dtype blocks might be split into List[Block[T], Block[U]] - split_items: List[np.ndarray] = [] - split_frames: List[DataFrame] = [] no_result = object() + + def cast_result_block(result, block: "Block", how: str) -> "Block": + # see if we can cast the block to the desired dtype + # this may not be the original dtype + assert not isinstance(result, DataFrame) + assert result is not no_result + + dtype = maybe_cast_result_dtype(block.dtype, how) + result = maybe_downcast_numeric(result, dtype) + + if block.is_extension and isinstance(result, np.ndarray): + # e.g. block.values was an IntegerArray + # (1, N) case can occur if block.values was Categorical + # and result is ndarray[object] + # TODO(EA2D): special casing not needed with 2D EAs + assert result.ndim == 1 or result.shape[0] == 1 + try: + # Cast back if feasible + result = type(block.values)._from_sequence( + result.ravel(), dtype=block.values.dtype + ) + except (ValueError, TypeError): + # reshape to be valid for non-Extension Block + result = result.reshape(1, -1) + + agg_block: Block = block.make_block(result) + return agg_block + for block in data.blocks: # Avoid inheriting result from earlier in the loop result = no_result @@ -1065,9 +1090,9 @@ def _cython_agg_blocks( # not try to add missing categories if grouping over multiple # Categoricals. This will done by later self._reindex_output() # Doing it here creates an error. See GH#34951 - s = get_groupby(obj, self.grouper, observed=True) + sgb = get_groupby(obj, self.grouper, observed=True) try: - result = s.aggregate(lambda x: alt(x, axis=self.axis)) + result = sgb.aggregate(lambda x: alt(x, axis=self.axis)) except TypeError: # we may have an exception in trying to aggregate # continue and exclude the block @@ -1081,54 +1106,26 @@ def _cython_agg_blocks( # about a single block input returning a single block output # is a lie. To keep the code-path for the typical non-split case # clean, we choose to clean up this mess later on. - split_items.append(locs) - split_frames.append(result) - continue - - assert len(result._mgr.blocks) == 1 - result = result._mgr.blocks[0].values - if isinstance(result, np.ndarray) and result.ndim == 1: - result = result.reshape(1, -1) - - assert not isinstance(result, DataFrame) - - if result is not no_result: - # see if we can cast the block to the desired dtype - # this may not be the original dtype - dtype = maybe_cast_result_dtype(block.dtype, how) - result = maybe_downcast_numeric(result, dtype) - - if block.is_extension and isinstance(result, np.ndarray): - # e.g. block.values was an IntegerArray - # (1, N) case can occur if block.values was Categorical - # and result is ndarray[object] - # TODO(EA2D): special casing not needed with 2D EAs - assert result.ndim == 1 or result.shape[0] == 1 - try: - # Cast back if feasible - result = type(block.values)._from_sequence( - result.ravel(), dtype=block.values.dtype - ) - except (ValueError, TypeError): - # reshape to be valid for non-Extension Block - result = result.reshape(1, -1) - - agg_block: Block = block.make_block(result) - - new_items.append(locs) - agg_blocks.append(agg_block) + assert len(locs) == result.shape[1] + for i, loc in enumerate(locs): + new_items.append(np.array([loc], dtype=locs.dtype)) + agg_block = result.iloc[:, [i]]._mgr.blocks[0] + agg_blocks.append(agg_block) + else: + result = result._mgr.blocks[0].values + if isinstance(result, np.ndarray) and result.ndim == 1: + result = result.reshape(1, -1) + agg_block = cast_result_block(result, block, how) + new_items.append(locs) + agg_blocks.append(agg_block) + else: + agg_block = cast_result_block(result, block, how) + new_items.append(locs) + agg_blocks.append(agg_block) - if not (agg_blocks or split_frames): + if not agg_blocks: raise DataError("No numeric types to aggregate") - if split_items: - # Clean up the mess left over from split blocks. - for locs, result in zip(split_items, split_frames): - assert len(locs) == result.shape[1] - for i, loc in enumerate(locs): - new_items.append(np.array([loc], dtype=locs.dtype)) - agg_blocks.append(result.iloc[:, [i]]._mgr.blocks[0]) - # reset the locs in the blocks to correspond to our # current ordering indexer = np.concatenate(new_items) From 71a327c4856e05dd761f0643aaa46903626e909c Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 8 Aug 2020 15:52:20 +0100 Subject: [PATCH 242/632] DOC: docstrings for __array_wrap__ (#35629) --- pandas/core/generic.py | 23 ++++++++++++++++++++++- pandas/core/indexes/base.py | 2 +- pandas/core/indexes/datetimelike.py | 2 +- pandas/core/indexes/period.py | 9 ++++++--- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 834cd992f5650..fcb7e2a949205 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1772,7 +1772,28 @@ def empty(self) -> bool_t: def __array__(self, dtype=None) -> np.ndarray: return np.asarray(self._values, dtype=dtype) - def __array_wrap__(self, result, context=None): + def __array_wrap__( + self, + result: np.ndarray, + context: Optional[Tuple[Callable, Tuple[Any, ...], int]] = None, + ): + """ + Gets called after a ufunc and other functions. + + Parameters + ---------- + result: np.ndarray + The result of the ufunc or other function called on the NumPy array + returned by __array__ + context: tuple of (func, tuple, int) + This parameter is returned by ufuncs as a 3-element tuple: (name of the + ufunc, arguments of the ufunc, domain of the ufunc), but is not set by + other numpy functions.q + + Notes + ----- + Series implements __array_ufunc_ so this not called for ufunc on Series. + """ result = lib.item_from_zerodim(result) if is_scalar(result): # e.g. we get here with np.ptp(series) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index bfdfbd35f27ad..bd75a064b483e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -574,7 +574,7 @@ def __array__(self, dtype=None) -> np.ndarray: def __array_wrap__(self, result, context=None): """ - Gets called after a ufunc. + Gets called after a ufunc and other functions. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0ce057d6e764a..8ccdab21339df 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -116,7 +116,7 @@ def values(self): def __array_wrap__(self, result, context=None): """ - Gets called after a ufunc. + Gets called after a ufunc and other functions. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index c7199e4a28a17..11334803d4583 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -345,10 +345,13 @@ def _int64index(self) -> Int64Index: def __array_wrap__(self, result, context=None): """ - Gets called after a ufunc. Needs additional handling as - PeriodIndex stores internal data as int dtype + Gets called after a ufunc and other functions. - Replace this to __numpy_ufunc__ in future version + Needs additional handling as PeriodIndex stores internal data as int + dtype + + Replace this to __numpy_ufunc__ in future version and implement + __array_function__ for Indexes """ if isinstance(context, tuple) and len(context) > 0: func = context[0] From aefae55e1960a718561ae0369e83605e3038f292 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 8 Aug 2020 18:49:24 +0100 Subject: [PATCH 243/632] TYP: update setup.cfg (#35628) --- setup.cfg | 6 ------ 1 file changed, 6 deletions(-) diff --git a/setup.cfg b/setup.cfg index 84c281b756395..e4c0b3dcf37ef 100644 --- a/setup.cfg +++ b/setup.cfg @@ -175,9 +175,6 @@ check_untyped_defs=False [mypy-pandas.core.groupby.ops] check_untyped_defs=False -[mypy-pandas.core.indexes.base] -check_untyped_defs=False - [mypy-pandas.core.indexes.datetimes] check_untyped_defs=False @@ -214,9 +211,6 @@ check_untyped_defs=False [mypy-pandas.core.window.common] check_untyped_defs=False -[mypy-pandas.core.window.ewm] -check_untyped_defs=False - [mypy-pandas.core.window.expanding] check_untyped_defs=False From 6875a050d13f811b34c0064837bb781aca1cffcb Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Mon, 10 Aug 2020 06:13:30 -0700 Subject: [PATCH 244/632] BUG: RollingGroupby with closed and column selection no longer raises ValueError (#35639) --- doc/source/whatsnew/v1.1.1.rst | 4 +++ pandas/core/window/common.py | 2 +- pandas/core/window/rolling.py | 10 ++---- pandas/tests/window/test_grouper.py | 51 +++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index f0ad9d1ca3b0f..7f5182e3eaa6f 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -51,6 +51,10 @@ Categorical - - +**Groupby/resample/rolling** + +- Bug in :class:`pandas.core.groupby.RollingGroupby` where passing ``closed`` with column selection would raise a ``ValueError`` (:issue:`35549`) + **Plotting** - diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 58e7841d4dde5..51a067427e867 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -52,7 +52,7 @@ def __init__(self, obj, *args, **kwargs): kwargs.pop("parent", None) groupby = kwargs.pop("groupby", None) if groupby is None: - groupby, obj = obj, obj.obj + groupby, obj = obj, obj._selected_obj self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index a04d68a6d6745..7347d5686aabc 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2212,7 +2212,7 @@ def _apply( # Cannot use _wrap_outputs because we calculate the result all at once # Compose MultiIndex result from grouping levels then rolling level # Aggregate the MultiIndex data as tuples then the level names - grouped_object_index = self._groupby._selected_obj.index + grouped_object_index = self.obj.index grouped_index_name = [grouped_object_index.name] groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings] result_index_names = groupby_keys + grouped_index_name @@ -2236,10 +2236,6 @@ def _apply( def _constructor(self): return Rolling - @cache_readonly - def _selected_obj(self): - return self._groupby._selected_obj - def _create_blocks(self, obj: FrameOrSeries): """ Split data into blocks & return conformed data. @@ -2278,7 +2274,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]] if self.is_freq_type: rolling_indexer = VariableWindowIndexer - index_array = self._groupby._selected_obj.index.asi8 + index_array = self.obj.index.asi8 else: rolling_indexer = FixedWindowIndexer index_array = None @@ -2295,7 +2291,7 @@ def _gotitem(self, key, ndim, subset=None): # here so our index is carried thru to the selected obj # when we do the splitting for the groupby if self.on is not None: - self._groupby.obj = self._groupby.obj.set_index(self._on) + self.obj = self.obj.set_index(self._on) self.on = None return super()._gotitem(key, ndim, subset=subset) diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 5241b9548a442..e1dcac06c39cc 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -304,3 +304,54 @@ def test_groupby_subselect_rolling(self): name="b", ) tm.assert_series_equal(result, expected) + + def test_groupby_rolling_subset_with_closed(self): + # GH 35549 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": [pd.Timestamp("2019-01-01")] * 6, + } + ) + result = ( + df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum() + ) + expected = Series( + [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], + index=pd.MultiIndex.from_tuples( + [("A", pd.Timestamp("2019-01-01"))] * 3 + + [("B", pd.Timestamp("2019-01-01"))] * 3, + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_subset_rolling_subset_with_closed(self): + # GH 35549 + df = pd.DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": [pd.Timestamp("2019-01-01")] * 6, + } + ) + + result = ( + df.groupby("group")[["column1", "date"]] + .rolling("1D", on="date", closed="left")["column1"] + .sum() + ) + expected = Series( + [np.nan, 0.0, 2.0, np.nan, 1.0, 4.0], + index=pd.MultiIndex.from_tuples( + [("A", pd.Timestamp("2019-01-01"))] * 3 + + [("B", pd.Timestamp("2019-01-01"))] * 3, + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected) From 5dee73b172f04d6118edf4337819fe592e154163 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Mon, 10 Aug 2020 14:18:53 +0100 Subject: [PATCH 245/632] DEPR: Deprecate inplace param in MultiIndex.set_codes and MultiIndex.set_levels (#35626) --- doc/source/user_guide/indexing.rst | 8 +-- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/conftest.py | 2 +- pandas/core/indexes/multi.py | 26 ++++++++- pandas/tests/frame/methods/test_sort_index.py | 4 +- pandas/tests/indexes/multi/test_compat.py | 6 +- pandas/tests/indexes/multi/test_duplicates.py | 3 +- .../tests/indexes/multi/test_equivalence.py | 6 +- pandas/tests/indexes/multi/test_get_set.py | 55 ++++++++++++++----- pandas/tests/indexes/multi/test_integrity.py | 3 +- .../tests/indexing/multiindex/test_sorted.py | 8 ++- pandas/tests/reshape/test_melt.py | 4 +- pandas/tests/test_multilevel.py | 4 +- 13 files changed, 94 insertions(+), 37 deletions(-) diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 6843dd1eadc81..cac18f5bf39cd 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1532,12 +1532,8 @@ Setting metadata ~~~~~~~~~~~~~~~~ Indexes are "mostly immutable", but it is possible to set and change their -metadata, like the index ``name`` (or, for ``MultiIndex``, ``levels`` and -``codes``). - -You can use the ``rename``, ``set_names``, ``set_levels``, and ``set_codes`` -to set these attributes directly. They default to returning a copy; however, -you can specify ``inplace=True`` to have the data change in place. +``name`` attribute. You can use the ``rename``, ``set_names`` to set these attributes +directly, and they default to returning a copy. See :ref:`Advanced Indexing ` for usage of MultiIndexes. diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 33e70daa55e66..d3bccada09c29 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -47,7 +47,7 @@ Other enhancements Deprecations ~~~~~~~~~~~~ - +- Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) - - diff --git a/pandas/conftest.py b/pandas/conftest.py index e0adb37e7d2f5..c1925b4f5ca3b 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -359,7 +359,7 @@ def multiindex_year_month_day_dataframe_random_data(): tdf = tm.makeTimeDataFrame(100) ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() # use Int64Index, to make sure things work - ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True) + ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels]) ymd.index.set_names(["year", "month", "day"], inplace=True) return ymd diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a6e8ec0707de7..13927dede5542 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -740,7 +740,7 @@ def _set_levels( self._tuples = None self._reset_cache() - def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): + def set_levels(self, levels, level=None, inplace=None, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. @@ -752,6 +752,8 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): Level(s) to set (None for all levels). inplace : bool If True, mutates in place. + + .. deprecated:: 1.2.0 verify_integrity : bool, default True If True, checks that levels and codes are compatible. @@ -822,6 +824,15 @@ def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): >>> idx.set_levels([['a', 'b', 'c'], [1, 2, 3, 4]], level=[0, 1]).levels FrozenList([['a', 'b', 'c'], [1, 2, 3, 4]]) """ + if inplace is not None: + warnings.warn( + "inplace is deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=2, + ) + else: + inplace = False + if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) @@ -898,7 +909,7 @@ def _set_codes( self._tuples = None self._reset_cache() - def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): + def set_codes(self, codes, level=None, inplace=None, verify_integrity=True): """ Set new codes on MultiIndex. Defaults to returning new index. @@ -914,6 +925,8 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): Level(s) to set (None for all levels). inplace : bool If True, mutates in place. + + .. deprecated:: 1.2.0 verify_integrity : bool (default True) If True, checks that levels and codes are compatible. @@ -958,6 +971,15 @@ def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): (1, 'two')], names=['foo', 'bar']) """ + if inplace is not None: + warnings.warn( + "inplace is deprecated and will be removed in a future version.", + FutureWarning, + stacklevel=2, + ) + else: + inplace = False + if level is not None and not is_list_like(level): if not is_list_like(codes): raise TypeError("Codes must be list-like") diff --git a/pandas/tests/frame/methods/test_sort_index.py b/pandas/tests/frame/methods/test_sort_index.py index 5216c3be116e0..dcc33428d18a5 100644 --- a/pandas/tests/frame/methods/test_sort_index.py +++ b/pandas/tests/frame/methods/test_sort_index.py @@ -555,8 +555,8 @@ def test_sort_index_and_reconstruction(self): ), ) - df.columns.set_levels( - pd.to_datetime(df.columns.levels[1]), level=1, inplace=True + df.columns = df.columns.set_levels( + pd.to_datetime(df.columns.levels[1]), level=1 ) assert not df.columns.is_lexsorted() assert not df.columns.is_monotonic diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index d1f66af4a8e83..b2500efef9e03 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -84,7 +84,8 @@ def test_inplace_mutation_resets_values(): tm.assert_almost_equal(mi1.values, vals) # Inplace should kill _tuples - mi1.set_levels(levels2, inplace=True) + with tm.assert_produces_warning(FutureWarning): + mi1.set_levels(levels2, inplace=True) tm.assert_almost_equal(mi1.values, vals2) # Make sure label setting works too @@ -103,7 +104,8 @@ def test_inplace_mutation_resets_values(): tm.assert_almost_equal(exp_values, new_values) # ...and again setting inplace should kill _tuples, etc - mi2.set_codes(codes2, inplace=True) + with tm.assert_produces_warning(FutureWarning): + mi2.set_codes(codes2, inplace=True) tm.assert_almost_equal(mi2.values, new_values) diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index e48731b9c8099..9add4b478da47 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -91,7 +91,8 @@ def test_duplicate_multiindex_codes(): mi = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) msg = r"Level values must be unique: \[[AB', ]+\] on level 0" with pytest.raises(ValueError, match=msg): - mi.set_levels([["A", "B", "A", "A", "B"], [2, 1, 3, -2, 5]], inplace=True) + with tm.assert_produces_warning(FutureWarning): + mi.set_levels([["A", "B", "A", "A", "B"], [2, 1, 3, -2, 5]], inplace=True) @pytest.mark.parametrize("names", [["a", "b", "a"], [1, 1, 2], [1, "a", 1]]) diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py index 063ede028add7..b48f09457b96c 100644 --- a/pandas/tests/indexes/multi/test_equivalence.py +++ b/pandas/tests/indexes/multi/test_equivalence.py @@ -192,10 +192,12 @@ def test_is_(): mi4 = mi3.view() # GH 17464 - Remove duplicate MultiIndex levels - mi4.set_levels([list(range(10)), list(range(10))], inplace=True) + with tm.assert_produces_warning(FutureWarning): + mi4.set_levels([list(range(10)), list(range(10))], inplace=True) assert not mi4.is_(mi3) mi5 = mi.view() - mi5.set_levels(mi5.levels, inplace=True) + with tm.assert_produces_warning(FutureWarning): + mi5.set_levels(mi5.levels, inplace=True) assert not mi5.is_(mi) diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index 8a3deca0236e4..b9132f429905d 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -93,7 +93,8 @@ def test_set_levels(idx): # level changing [w/ mutation] ind2 = idx.copy() - inplace_return = ind2.set_levels(new_levels, inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_levels(new_levels, inplace=True) assert inplace_return is None assert_matching(ind2.levels, new_levels) @@ -113,20 +114,23 @@ def test_set_levels(idx): # level changing specific level [w/ mutation] ind2 = idx.copy() - inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True) assert inplace_return is None assert_matching(ind2.levels, [new_levels[0], levels[1]]) assert_matching(idx.levels, levels) ind2 = idx.copy() - inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True) assert inplace_return is None assert_matching(ind2.levels, [levels[0], new_levels[1]]) assert_matching(idx.levels, levels) # level changing multiple levels [w/ mutation] ind2 = idx.copy() - inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True) assert inplace_return is None assert_matching(ind2.levels, new_levels) assert_matching(idx.levels, levels) @@ -136,19 +140,23 @@ def test_set_levels(idx): original_index = idx.copy() for inplace in [True, False]: with pytest.raises(ValueError, match="^On"): - idx.set_levels(["c"], level=0, inplace=inplace) + with tm.assert_produces_warning(FutureWarning): + idx.set_levels(["c"], level=0, inplace=inplace) assert_matching(idx.levels, original_index.levels, check_dtype=True) with pytest.raises(ValueError, match="^On"): - idx.set_codes([0, 1, 2, 3, 4, 5], level=0, inplace=inplace) + with tm.assert_produces_warning(FutureWarning): + idx.set_codes([0, 1, 2, 3, 4, 5], level=0, inplace=inplace) assert_matching(idx.codes, original_index.codes, check_dtype=True) with pytest.raises(TypeError, match="^Levels"): - idx.set_levels("c", level=0, inplace=inplace) + with tm.assert_produces_warning(FutureWarning): + idx.set_levels("c", level=0, inplace=inplace) assert_matching(idx.levels, original_index.levels, check_dtype=True) with pytest.raises(TypeError, match="^Codes"): - idx.set_codes(1, level=0, inplace=inplace) + with tm.assert_produces_warning(FutureWarning): + idx.set_codes(1, level=0, inplace=inplace) assert_matching(idx.codes, original_index.codes, check_dtype=True) @@ -168,7 +176,8 @@ def test_set_codes(idx): # changing label w/ mutation ind2 = idx.copy() - inplace_return = ind2.set_codes(new_codes, inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_codes(new_codes, inplace=True) assert inplace_return is None assert_matching(ind2.codes, new_codes) @@ -188,20 +197,23 @@ def test_set_codes(idx): # label changing specific level w/ mutation ind2 = idx.copy() - inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True) assert inplace_return is None assert_matching(ind2.codes, [new_codes[0], codes[1]]) assert_matching(idx.codes, codes) ind2 = idx.copy() - inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True) assert inplace_return is None assert_matching(ind2.codes, [codes[0], new_codes[1]]) assert_matching(idx.codes, codes) # codes changing multiple levels [w/ mutation] ind2 = idx.copy() - inplace_return = ind2.set_codes(new_codes, level=[0, 1], inplace=True) + with tm.assert_produces_warning(FutureWarning): + inplace_return = ind2.set_codes(new_codes, level=[0, 1], inplace=True) assert inplace_return is None assert_matching(ind2.codes, new_codes) assert_matching(idx.codes, codes) @@ -217,7 +229,8 @@ def test_set_codes(idx): # [w/ mutation] result = ind.copy() - result.set_codes(codes=new_codes, level=1, inplace=True) + with tm.assert_produces_warning(FutureWarning): + result.set_codes(codes=new_codes, level=1, inplace=True) assert result.equals(expected) @@ -329,3 +342,19 @@ def test_set_levels_with_iterable(): [expected_sizes, colors], names=["size", "color"] ) tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("inplace", [True, False]) +def test_set_codes_inplace_deprecated(idx, inplace): + new_codes = idx.codes[1][::-1] + + with tm.assert_produces_warning(FutureWarning): + idx.set_codes(codes=new_codes, level=1, inplace=inplace) + + +@pytest.mark.parametrize("inplace", [True, False]) +def test_set_levels_inplace_deprecated(idx, inplace): + new_level = idx.levels[1].copy() + + with tm.assert_produces_warning(FutureWarning): + idx.set_levels(levels=new_level, level=1, inplace=inplace) diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index fd150bb4d57a2..c776a33717ccd 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -220,7 +220,8 @@ def test_metadata_immutable(idx): def test_level_setting_resets_attributes(): ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) assert ind.is_monotonic - ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True) + with tm.assert_produces_warning(FutureWarning): + ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True) # if this fails, probably didn't reset the cache correctly. assert not ind.is_monotonic diff --git a/pandas/tests/indexing/multiindex/test_sorted.py b/pandas/tests/indexing/multiindex/test_sorted.py index 572cb9da405d1..bafe5068e1418 100644 --- a/pandas/tests/indexing/multiindex/test_sorted.py +++ b/pandas/tests/indexing/multiindex/test_sorted.py @@ -43,9 +43,13 @@ def test_frame_getitem_not_sorted2(self, key): df2 = df.set_index(["col1", "col2"]) df2_original = df2.copy() - return_value = df2.index.set_levels(["b", "d", "a"], level="col1", inplace=True) + with tm.assert_produces_warning(FutureWarning): + return_value = df2.index.set_levels( + ["b", "d", "a"], level="col1", inplace=True + ) assert return_value is None - return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True) + with tm.assert_produces_warning(FutureWarning): + return_value = df2.index.set_codes([0, 1, 0, 2], level="col1", inplace=True) assert return_value is None assert not df2.index.is_lexsorted() assert not df2.index.is_monotonic diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 2b75a1ec6ca6e..79879ef346f53 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -799,7 +799,7 @@ def test_invalid_separator(self): expected = expected.set_index(["id", "year"])[ ["X", "A2010", "A2011", "B2010", "A", "B"] ] - expected.index.set_levels([0, 1], level=0, inplace=True) + expected.index = expected.index.set_levels([0, 1], level=0) result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep) tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) @@ -861,7 +861,7 @@ def test_invalid_suffixtype(self): expected = pd.DataFrame(exp_data).astype({"year": "int"}) expected = expected.set_index(["id", "year"]) - expected.index.set_levels([0, 1], level=0, inplace=True) + expected.index = expected.index.set_levels([0, 1], level=0) result = wide_to_long(df, ["A", "B"], i="id", j="year") tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 1ba73292dc0b4..724558bd49ea2 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -63,8 +63,8 @@ def setup_method(self, method): ).sum() # use Int64Index, to make sure things work - self.ymd.index.set_levels( - [lev.astype("i8") for lev in self.ymd.index.levels], inplace=True + self.ymd.index = self.ymd.index.set_levels( + [lev.astype("i8") for lev in self.ymd.index.levels] ) self.ymd.index.set_names(["year", "month", "day"], inplace=True) From 25601734b19ec19aee31054846386a88d9c179b3 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Mon, 10 Aug 2020 14:24:29 +0100 Subject: [PATCH 246/632] REF: Simplify Index.copy (#35592) --- pandas/core/dtypes/common.py | 28 +++++++++++++++++- pandas/core/indexes/base.py | 36 +++++++++++++++--------- pandas/core/indexes/range.py | 5 ++-- pandas/core/series.py | 4 +-- pandas/tests/dtypes/test_common.py | 10 +++++++ pandas/tests/indexes/common.py | 14 +++++++++ pandas/tests/indexes/multi/test_names.py | 7 +++++ 7 files changed, 84 insertions(+), 20 deletions(-) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 73109020b1b54..1e70ff90fcd44 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -9,7 +9,7 @@ from pandas._libs import Interval, Period, algos from pandas._libs.tslibs import conversion -from pandas._typing import ArrayLike, DtypeObj +from pandas._typing import ArrayLike, DtypeObj, Optional from pandas.core.dtypes.base import registry from pandas.core.dtypes.dtypes import ( @@ -1732,6 +1732,32 @@ def _validate_date_like_dtype(dtype) -> None: ) +def validate_all_hashable(*args, error_name: Optional[str] = None) -> None: + """ + Return None if all args are hashable, else raise a TypeError. + + Parameters + ---------- + *args + Arguments to validate. + error_name : str, optional + The name to use if error + + Raises + ------ + TypeError : If an argument is not hashable + + Returns + ------- + None + """ + if not all(is_hashable(arg) for arg in args): + if error_name: + raise TypeError(f"{error_name} must be a hashable type") + else: + raise TypeError("All elements must be hashable") + + def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index bd75a064b483e..ecd3670e724a1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -58,6 +58,7 @@ is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype, + validate_all_hashable, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( @@ -812,13 +813,11 @@ def copy(self, name=None, deep=False, dtype=None, names=None): In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ + name = self._validate_names(name=name, names=names, deep=deep)[0] if deep: - new_index = self._shallow_copy(self._data.copy()) + new_index = self._shallow_copy(self._data.copy(), name=name) else: - new_index = self._shallow_copy() - - names = self._validate_names(name=name, names=names, deep=deep) - new_index = new_index.set_names(names) + new_index = self._shallow_copy(name=name) if dtype: new_index = new_index.astype(dtype) @@ -1186,7 +1185,7 @@ def name(self, value): maybe_extract_name(value, None, type(self)) self._name = value - def _validate_names(self, name=None, names=None, deep: bool = False): + def _validate_names(self, name=None, names=None, deep: bool = False) -> List[Label]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. @@ -1196,15 +1195,25 @@ def _validate_names(self, name=None, names=None, deep: bool = False): if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") elif names is None and name is None: - return deepcopy(self.names) if deep else self.names + new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") - return names + new_names = names + elif not is_list_like(name): + new_names = [name] else: - if not is_list_like(name): - return [name] - return name + new_names = name + + if len(new_names) != len(self.names): + raise ValueError( + f"Length of new names must be {len(self.names)}, got {len(new_names)}" + ) + + # All items in 'new_names' need to be hashable + validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") + + return new_names def _get_names(self): return FrozenList((self.name,)) @@ -1232,9 +1241,8 @@ def _set_names(self, values, level=None): # GH 20527 # All items in 'name' need to be hashable: - for name in values: - if not is_hashable(name): - raise TypeError(f"{type(self).__name__}.name must be a hashable type") + validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") + self._name = values[0] names = property(fset=_set_names, fget=_get_names) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index e9c4c301f4dca..3577a7aacc008 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -388,9 +388,8 @@ def _shallow_copy(self, values=None, name: Label = no_default): def copy(self, name=None, deep=False, dtype=None, names=None): self._validate_dtype(dtype) - new_index = self._shallow_copy() - names = self._validate_names(name=name, names=names, deep=deep) - new_index = new_index.set_names(names) + name = self._validate_names(name=name, names=names, deep=deep)[0] + new_index = self._shallow_copy(name=name) return new_index def _minmax(self, meth: str): diff --git a/pandas/core/series.py b/pandas/core/series.py index 9e70120f67969..93368ea1e515f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -54,6 +54,7 @@ is_list_like, is_object_dtype, is_scalar, + validate_all_hashable, ) from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.dtypes.inference import is_hashable @@ -491,8 +492,7 @@ def name(self) -> Label: @name.setter def name(self, value: Label) -> None: - if not is_hashable(value): - raise TypeError("Series.name must be a hashable type") + validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) @property diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index ce12718e48d0d..a6c526fcb008a 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -746,3 +746,13 @@ def test_astype_object_preserves_datetime_na(from_type): result = astype_nansafe(arr, dtype="object") assert isna(result)[0] + + +def test_validate_allhashable(): + assert com.validate_all_hashable(1, "a") is None + + with pytest.raises(TypeError, match="All elements must be hashable"): + com.validate_all_hashable([]) + + with pytest.raises(TypeError, match="list must be a hashable type"): + com.validate_all_hashable([], error_name="list") diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 238ee8d304d05..98f7c0eadb4bb 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -270,6 +270,20 @@ def test_copy_name(self, index): s3 = s1 * s2 assert s3.index.name == "mario" + def test_name2(self, index): + # gh-35592 + if isinstance(index, MultiIndex): + return + + assert index.copy(name="mario").name == "mario" + + with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): + index.copy(name=["mario", "luigi"]) + + msg = f"{type(index).__name__}.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + index.copy(name=[["mario"]]) + def test_ensure_copied_data(self, index): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index 479b5ef0211a0..f38da7ad2ae1c 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -75,6 +75,13 @@ def test_copy_names(): assert multi_idx.names == ["MyName1", "MyName2"] assert multi_idx3.names == ["NewName1", "NewName2"] + # gh-35592 + with pytest.raises(ValueError, match="Length of new names must be 2, got 1"): + multi_idx.copy(names=["mario"]) + + with pytest.raises(TypeError, match="MultiIndex.name must be a hashable type"): + multi_idx.copy(names=[["mario"], ["luigi"]]) + def test_names(idx, index_names): From 9a8152c95c8c61a0cdf33c64b80bbc9f2f9d27b6 Mon Sep 17 00:00:00 2001 From: Isaac Virshup Date: Mon, 10 Aug 2020 23:30:12 +1000 Subject: [PATCH 247/632] =?UTF-8?q?BUG:=20Fix=20assert=5Fequal=20when=20ch?= =?UTF-8?q?eck=5Fexact=3DTrue=20for=20non-numeric=20dtypes=20#3=E2=80=A6?= =?UTF-8?q?=20(#35522)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/_testing.py | 6 ++---- pandas/tests/util/test_assert_series_equal.py | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 7f5182e3eaa6f..e5860644fa371 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). +- Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) diff --git a/pandas/_testing.py b/pandas/_testing.py index a020fbff3553a..713f29466f097 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1339,10 +1339,8 @@ def assert_series_equal( else: assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") - if check_exact: - if not is_numeric_dtype(left.dtype): - raise AssertionError("check_exact may only be used with numeric Series") - + if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype): + # Only check exact if dtype is numeric assert_numpy_array_equal( left._values, right._values, diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index 1284cc9d4f49b..a7b5aeac560e4 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -281,3 +281,18 @@ class MySeries(Series): with pytest.raises(AssertionError, match="Series classes are different"): tm.assert_series_equal(s3, s1, check_series_type=True) + + +def test_series_equal_exact_for_nonnumeric(): + # https://github.com/pandas-dev/pandas/issues/35446 + s1 = Series(["a", "b"]) + s2 = Series(["a", "b"]) + s3 = Series(["b", "a"]) + + tm.assert_series_equal(s1, s2, check_exact=True) + tm.assert_series_equal(s2, s1, check_exact=True) + + with pytest.raises(AssertionError): + tm.assert_series_equal(s1, s3, check_exact=True) + with pytest.raises(AssertionError): + tm.assert_series_equal(s3, s1, check_exact=True) From df1d440c4a0c583b06d17a885d4589b9edaa840b Mon Sep 17 00:00:00 2001 From: Martin Durant Date: Mon, 10 Aug 2020 11:19:25 -0400 Subject: [PATCH 248/632] Storage options (#35381) --- doc/source/user_guide/io.rst | 61 +++++++++++++--- doc/source/whatsnew/v1.2.0.rst | 14 ++++ pandas/_typing.py | 3 + pandas/conftest.py | 22 ++++++ pandas/core/frame.py | 37 +++++++++- pandas/core/generic.py | 44 +++++++++++- pandas/core/series.py | 20 +++++- pandas/io/common.py | 24 +++++-- pandas/io/feather_format.py | 25 +++++-- pandas/io/formats/csvs.py | 9 ++- pandas/io/json/_json.py | 28 ++++++-- pandas/io/parquet.py | 48 +++++++++++-- pandas/io/parsers.py | 5 +- pandas/io/pickle.py | 34 +++++++-- pandas/io/sas/sas_xport.py | 2 +- pandas/io/stata.py | 53 +++++++++++--- pandas/tests/io/test_fsspec.py | 125 ++++++++++++++++++++++++++++++++- pandas/tests/io/test_s3.py | 2 +- 18 files changed, 502 insertions(+), 54 deletions(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index ab233f653061a..35403b5c8b66f 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -1649,29 +1649,72 @@ options include: Specifying any of the above options will produce a ``ParserWarning`` unless the python engine is selected explicitly using ``engine='python'``. -Reading remote files -'''''''''''''''''''' +.. _io.remote: + +Reading/writing remote files +'''''''''''''''''''''''''''' -You can pass in a URL to a CSV file: +You can pass in a URL to read or write remote files to many of Pandas' IO +functions - the following example shows reading a CSV file: .. code-block:: python df = pd.read_csv('https://download.bls.gov/pub/time.series/cu/cu.item', sep='\t') -S3 URLs are handled as well but require installing the `S3Fs +All URLs which are not local files or HTTP(s) are handled by +`fsspec`_, if installed, and its various filesystem implementations +(including Amazon S3, Google Cloud, SSH, FTP, webHDFS...). +Some of these implementations will require additional packages to be +installed, for example +S3 URLs require the `s3fs `_ library: .. code-block:: python - df = pd.read_csv('s3://pandas-test/tips.csv') + df = pd.read_json('s3://pandas-test/adatafile.json') + +When dealing with remote storage systems, you might need +extra configuration with environment variables or config files in +special locations. For example, to access data in your S3 bucket, +you will need to define credentials in one of the several ways listed in +the `S3Fs documentation +`_. The same is true +for several of the storage backends, and you should follow the links +at `fsimpl1`_ for implementations built into ``fsspec`` and `fsimpl2`_ +for those not included in the main ``fsspec`` +distribution. + +You can also pass parameters directly to the backend driver. For example, +if you do *not* have S3 credentials, you can still access public data by +specifying an anonymous connection, such as + +.. versionadded:: 1.2.0 + +.. code-block:: python + + pd.read_csv("s3://ncei-wcsd-archive/data/processed/SH1305/18kHz/SaKe2013" + "-D20130523-T080854_to_SaKe2013-D20130523-T085643.csv", + storage_options={"anon": True}) + +``fsspec`` also allows complex URLs, for accessing data in compressed +archives, local caching of files, and more. To locally cache the above +example, you would modify the call to + +.. code-block:: python -If your S3 bucket requires credentials you will need to set them as environment -variables or in the ``~/.aws/credentials`` config file, refer to the `S3Fs -documentation on credentials -`_. + pd.read_csv("simplecache::s3://ncei-wcsd-archive/data/processed/SH1305/18kHz/" + "SaKe2013-D20130523-T080854_to_SaKe2013-D20130523-T085643.csv", + storage_options={"s3": {"anon": True}}) +where we specify that the "anon" parameter is meant for the "s3" part of +the implementation, not to the caching implementation. Note that this caches to a temporary +directory for the duration of the session only, but you can also specify +a permanent store. +.. _fsspec: https://filesystem-spec.readthedocs.io/en/latest/ +.. _fsimpl1: https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations +.. _fsimpl2: https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations Writing out data '''''''''''''''' diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index d3bccada09c29..94bb265c32e4c 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -13,6 +13,20 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +Passing arguments to fsspec backends +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Many read/write functions have acquired the ``storage_options`` optional argument, +to pass a dictionary of parameters to the storage backend. This allows, for +example, for passing credentials to S3 and GCS storage. The details of what +parameters can be passed to which backends can be found in the documentation +of the individual storage backends (detailed from the fsspec docs for +`builtin implementations`_ and linked to `external ones`_). See +Section :ref:`io.remote`. + +.. _builtin implementations: https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations +.. _external ones: https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations + .. _whatsnew_120.binary_handle_to_csv: Support for binary file handles in ``to_csv`` diff --git a/pandas/_typing.py b/pandas/_typing.py index 76ec527e6e258..47a102ddc70e0 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -106,3 +106,6 @@ List[AggFuncTypeBase], Dict[Label, Union[AggFuncTypeBase, List[AggFuncTypeBase]]], ] + +# for arbitrary kwargs passed during reading/writing files +StorageOptions = Optional[Dict[str, Any]] diff --git a/pandas/conftest.py b/pandas/conftest.py index c1925b4f5ca3b..97cc514e31bb3 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1224,3 +1224,25 @@ def sort_by_key(request): Tests None (no key) and the identity key. """ return request.param + + +@pytest.fixture() +def fsspectest(): + pytest.importorskip("fsspec") + from fsspec import register_implementation + from fsspec.implementations.memory import MemoryFileSystem + from fsspec.registry import _registry as registry + + class TestMemoryFS(MemoryFileSystem): + protocol = "testmem" + test = [None] + + def __init__(self, **kwargs): + self.test[0] = kwargs.pop("test", None) + super().__init__(**kwargs) + + register_implementation("testmem", TestMemoryFS, clobber=True) + yield TestMemoryFS() + registry.pop("testmem", None) + TestMemoryFS.test[0] = None + TestMemoryFS.store.clear() diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b66b6b92336f2..9d0751fcce460 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -55,6 +55,7 @@ Label, Level, Renamer, + StorageOptions, ValueKeyFunc, ) from pandas.compat import PY37 @@ -2058,6 +2059,7 @@ def to_stata( version: Optional[int] = 114, convert_strl: Optional[Sequence[Label]] = None, compression: Union[str, Mapping[str, str], None] = "infer", + storage_options: StorageOptions = None, ) -> None: """ Export DataFrame object to Stata dta format. @@ -2134,6 +2136,16 @@ def to_stata( .. versionadded:: 1.1.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values. + + .. versionadded:: 1.2.0 + Raises ------ NotImplementedError @@ -2194,6 +2206,7 @@ def to_stata( write_index=write_index, variable_labels=variable_labels, compression=compression, + storage_options=storage_options, **kwargs, ) writer.write_file() @@ -2246,9 +2259,10 @@ def to_feather(self, path, **kwargs) -> None: ) def to_markdown( self, - buf: Optional[IO[str]] = None, - mode: Optional[str] = None, + buf: Optional[Union[IO[str], str]] = None, + mode: str = "wt", index: bool = True, + storage_options: StorageOptions = None, **kwargs, ) -> Optional[str]: if "showindex" in kwargs: @@ -2266,9 +2280,14 @@ def to_markdown( result = tabulate.tabulate(self, **kwargs) if buf is None: return result - buf, _, _, _ = get_filepath_or_buffer(buf, mode=mode) + buf, _, _, should_close = get_filepath_or_buffer( + buf, mode=mode, storage_options=storage_options + ) assert buf is not None # Help mypy. + assert not isinstance(buf, str) buf.writelines(result) + if should_close: + buf.close() return None @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") @@ -2279,6 +2298,7 @@ def to_parquet( compression: Optional[str] = "snappy", index: Optional[bool] = None, partition_cols: Optional[List[str]] = None, + storage_options: StorageOptions = None, **kwargs, ) -> None: """ @@ -2327,6 +2347,16 @@ def to_parquet( .. versionadded:: 0.24.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io ` for more details. @@ -2373,6 +2403,7 @@ def to_parquet( compression=compression, index=index, partition_cols=partition_cols, + storage_options=storage_options, **kwargs, ) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fcb7e2a949205..520023050d49d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -40,6 +40,7 @@ Label, Level, Renamer, + StorageOptions, TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, @@ -2058,6 +2059,7 @@ def to_json( compression: Optional[str] = "infer", index: bool_t = True, indent: Optional[int] = None, + storage_options: StorageOptions = None, ) -> Optional[str]: """ Convert the object to a JSON string. @@ -2141,6 +2143,16 @@ def to_json( .. versionadded:: 1.0.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + Returns ------- None or str @@ -2319,6 +2331,7 @@ def to_json( compression=compression, index=index, indent=indent, + storage_options=storage_options, ) def to_hdf( @@ -2633,6 +2646,7 @@ def to_pickle( path, compression: Optional[str] = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, + storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. @@ -2653,6 +2667,16 @@ def to_pickle( .. [1] https://docs.python.org/3/library/pickle.html. + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + See Also -------- read_pickle : Load pickled pandas object (or any object) from file. @@ -2686,7 +2710,13 @@ def to_pickle( """ from pandas.io.pickle import to_pickle - to_pickle(self, path, compression=compression, protocol=protocol) + to_pickle( + self, + path, + compression=compression, + protocol=protocol, + storage_options=storage_options, + ) def to_clipboard( self, excel: bool_t = True, sep: Optional[str] = None, **kwargs @@ -3031,6 +3061,7 @@ def to_csv( escapechar: Optional[str] = None, decimal: Optional[str] = ".", errors: str = "strict", + storage_options: StorageOptions = None, ) -> Optional[str]: r""" Write object to a comma-separated values (csv) file. @@ -3142,6 +3173,16 @@ def to_csv( .. versionadded:: 1.1.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + Returns ------- None or str @@ -3194,6 +3235,7 @@ def to_csv( doublequote=doublequote, escapechar=escapechar, decimal=decimal, + storage_options=storage_options, ) formatter.save() diff --git a/pandas/core/series.py b/pandas/core/series.py index 93368ea1e515f..e8bf87a39b572 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -31,6 +31,7 @@ FrameOrSeriesUnion, IndexKeyFunc, Label, + StorageOptions, ValueKeyFunc, ) from pandas.compat.numpy import function as nv @@ -1422,8 +1423,9 @@ def to_string( def to_markdown( self, buf: Optional[IO[str]] = None, - mode: Optional[str] = None, + mode: str = "wt", index: bool = True, + storage_options: StorageOptions = None, **kwargs, ) -> Optional[str]: """ @@ -1436,12 +1438,22 @@ def to_markdown( buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional - Mode in which file is opened. + Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + **kwargs These parameters will be passed to `tabulate \ `_. @@ -1477,7 +1489,9 @@ def to_markdown( | 3 | quetzal | +----+----------+ """ - return self.to_frame().to_markdown(buf, mode, index, **kwargs) + return self.to_frame().to_markdown( + buf, mode, index, storage_options=storage_options, **kwargs + ) # ---------------------------------------------------------------------- diff --git a/pandas/io/common.py b/pandas/io/common.py index 34e4425c657f1..9ac642e58b544 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -29,7 +29,7 @@ ) import zipfile -from pandas._typing import FilePathOrBuffer +from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.compat import _get_lzma_file, _import_lzma from pandas.compat._optional import import_optional_dependency @@ -162,7 +162,7 @@ def get_filepath_or_buffer( encoding: Optional[str] = None, compression: Optional[str] = None, mode: Optional[str] = None, - storage_options: Optional[Dict[str, Any]] = None, + storage_options: StorageOptions = None, ): """ If the filepath_or_buffer is a url, translate and return the buffer. @@ -175,8 +175,16 @@ def get_filepath_or_buffer( compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional encoding : the encoding to use to decode bytes, default is 'utf-8' mode : str, optional - storage_options: dict, optional - passed on to fsspec, if using it; this is not yet accessed by the public API + + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 Returns ------- @@ -188,6 +196,10 @@ def get_filepath_or_buffer( if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): # TODO: fsspec can also handle HTTP via requests, but leaving this unchanged + if storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) req = urlopen(filepath_or_buffer) content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": @@ -242,6 +254,10 @@ def get_filepath_or_buffer( ).open() return file_obj, encoding, compression, True + elif storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): return _expand_user(filepath_or_buffer), None, compression, False diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index dfa43942fc8b3..2c664e73b9463 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -4,10 +4,10 @@ from pandas import DataFrame, Int64Index, RangeIndex -from pandas.io.common import get_filepath_or_buffer, stringify_path +from pandas.io.common import get_filepath_or_buffer -def to_feather(df: DataFrame, path, **kwargs): +def to_feather(df: DataFrame, path, storage_options=None, **kwargs): """ Write a DataFrame to the binary Feather format. @@ -15,6 +15,17 @@ def to_feather(df: DataFrame, path, **kwargs): ---------- df : DataFrame path : string file path, or file-like object + + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + **kwargs : Additional keywords passed to `pyarrow.feather.write_feather`. @@ -23,7 +34,9 @@ def to_feather(df: DataFrame, path, **kwargs): import_optional_dependency("pyarrow") from pyarrow import feather - path = stringify_path(path) + path, _, _, should_close = get_filepath_or_buffer( + path, mode="wb", storage_options=storage_options + ) if not isinstance(df, DataFrame): raise ValueError("feather only support IO with DataFrames") @@ -64,7 +77,7 @@ def to_feather(df: DataFrame, path, **kwargs): feather.write_feather(df, path, **kwargs) -def read_feather(path, columns=None, use_threads: bool = True): +def read_feather(path, columns=None, use_threads: bool = True, storage_options=None): """ Load a feather-format object from the file path. @@ -98,7 +111,9 @@ def read_feather(path, columns=None, use_threads: bool = True): import_optional_dependency("pyarrow") from pyarrow import feather - path, _, _, should_close = get_filepath_or_buffer(path) + path, _, _, should_close = get_filepath_or_buffer( + path, storage_options=storage_options + ) df = feather.read_feather(path, columns=columns, use_threads=bool(use_threads)) diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index b10946a20d041..6eceb94387171 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -11,7 +11,7 @@ import numpy as np from pandas._libs import writers as libwriters -from pandas._typing import FilePathOrBuffer +from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -53,6 +53,7 @@ def __init__( doublequote: bool = True, escapechar: Optional[str] = None, decimal=".", + storage_options: StorageOptions = None, ): self.obj = obj @@ -63,7 +64,11 @@ def __init__( compression, self.compression_args = get_compression_method(compression) self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer( - path_or_buf, encoding=encoding, compression=compression, mode=mode + path_or_buf, + encoding=encoding, + compression=compression, + mode=mode, + storage_options=storage_options, ) self.sep = sep self.na_rep = na_rep diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0b06a26d4aa3c..0d2b351926343 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -9,7 +9,7 @@ import pandas._libs.json as json from pandas._libs.tslibs import iNaT -from pandas._typing import JSONSerializable +from pandas._typing import JSONSerializable, StorageOptions from pandas.errors import AbstractMethodError from pandas.util._decorators import deprecate_kwarg, deprecate_nonkeyword_arguments @@ -44,6 +44,7 @@ def to_json( compression: Optional[str] = "infer", index: bool = True, indent: int = 0, + storage_options: StorageOptions = None, ): if not index and orient not in ["split", "table"]: @@ -52,8 +53,11 @@ def to_json( ) if path_or_buf is not None: - path_or_buf, _, _, _ = get_filepath_or_buffer( - path_or_buf, compression=compression, mode="w" + path_or_buf, _, _, should_close = get_filepath_or_buffer( + path_or_buf, + compression=compression, + mode="wt", + storage_options=storage_options, ) if lines and orient != "records": @@ -97,6 +101,8 @@ def to_json( return s else: path_or_buf.write(s) + if should_close: + path_or_buf.close() class Writer: @@ -365,6 +371,7 @@ def read_json( chunksize: Optional[int] = None, compression="infer", nrows: Optional[int] = None, + storage_options: StorageOptions = None, ): """ Convert a JSON string to pandas object. @@ -510,6 +517,16 @@ def read_json( .. versionadded:: 1.1 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + Returns ------- Series or DataFrame @@ -592,7 +609,10 @@ def read_json( compression = infer_compression(path_or_buf, compression) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( - path_or_buf, encoding=encoding, compression=compression + path_or_buf, + encoding=encoding, + compression=compression, + storage_options=storage_options, ) json_reader = JsonReader( diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 8c4b63767ac06..7f0eef039a1e8 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -3,7 +3,7 @@ from typing import Any, AnyStr, Dict, List, Optional from warnings import catch_warnings -from pandas._typing import FilePathOrBuffer +from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError @@ -89,6 +89,7 @@ def write( path: FilePathOrBuffer[AnyStr], compression: Optional[str] = "snappy", index: Optional[bool] = None, + storage_options: StorageOptions = None, partition_cols: Optional[List[str]] = None, **kwargs, ): @@ -105,9 +106,13 @@ def write( import_optional_dependency("fsspec") import fsspec.core - fs, path = fsspec.core.url_to_fs(path) + fs, path = fsspec.core.url_to_fs(path, **(storage_options or {})) kwargs["filesystem"] = fs else: + if storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) path = _expand_user(path) if partition_cols is not None: # writes to multiple files under the given path @@ -122,14 +127,20 @@ def write( # write to single output file self.api.parquet.write_table(table, path, compression=compression, **kwargs) - def read(self, path, columns=None, **kwargs): + def read( + self, path, columns=None, storage_options: StorageOptions = None, **kwargs, + ): if is_fsspec_url(path) and "filesystem" not in kwargs: import_optional_dependency("fsspec") import fsspec.core - fs, path = fsspec.core.url_to_fs(path) + fs, path = fsspec.core.url_to_fs(path, **(storage_options or {})) should_close = False else: + if storage_options: + raise ValueError( + "storage_options passed with buffer or non-fsspec filepath" + ) fs = kwargs.pop("filesystem", None) should_close = False path = _expand_user(path) @@ -163,6 +174,7 @@ def write( compression="snappy", index=None, partition_cols=None, + storage_options: StorageOptions = None, **kwargs, ): self.validate_dataframe(df) @@ -185,8 +197,14 @@ def write( fsspec = import_optional_dependency("fsspec") # if filesystem is provided by fsspec, file must be opened in 'wb' mode. - kwargs["open_with"] = lambda path, _: fsspec.open(path, "wb").open() + kwargs["open_with"] = lambda path, _: fsspec.open( + path, "wb", **(storage_options or {}) + ).open() else: + if storage_options: + raise ValueError( + "storage_options passed with file object or non-fsspec file path" + ) path, _, _, _ = get_filepath_or_buffer(path) with catch_warnings(record=True): @@ -199,11 +217,15 @@ def write( **kwargs, ) - def read(self, path, columns=None, **kwargs): + def read( + self, path, columns=None, storage_options: StorageOptions = None, **kwargs, + ): if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") - open_with = lambda path, _: fsspec.open(path, "rb").open() + open_with = lambda path, _: fsspec.open( + path, "rb", **(storage_options or {}) + ).open() parquet_file = self.api.ParquetFile(path, open_with=open_with) else: path, _, _, _ = get_filepath_or_buffer(path) @@ -218,6 +240,7 @@ def to_parquet( engine: str = "auto", compression: Optional[str] = "snappy", index: Optional[bool] = None, + storage_options: StorageOptions = None, partition_cols: Optional[List[str]] = None, **kwargs, ): @@ -261,6 +284,16 @@ def to_parquet( .. versionadded:: 0.24.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + kwargs Additional keyword arguments passed to the engine """ @@ -273,6 +306,7 @@ def to_parquet( compression=compression, index=index, partition_cols=partition_cols, + storage_options=storage_options, **kwargs, ) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index d4f346f8c1087..9dc0e1f71d13b 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -420,6 +420,7 @@ def _validate_names(names): def _read(filepath_or_buffer: FilePathOrBuffer, kwds): """Generic reader of line files.""" encoding = kwds.get("encoding", None) + storage_options = kwds.get("storage_options", None) if encoding is not None: encoding = re.sub("_", "-", encoding).lower() kwds["encoding"] = encoding @@ -432,7 +433,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): # though mypy handling of conditional imports is difficult. # See https://github.com/python/mypy/issues/1297 fp_or_buf, _, compression, should_close = get_filepath_or_buffer( - filepath_or_buffer, encoding, compression + filepath_or_buffer, encoding, compression, storage_options=storage_options ) kwds["compression"] = compression @@ -595,6 +596,7 @@ def read_csv( low_memory=_c_parser_defaults["low_memory"], memory_map=False, float_precision=None, + storage_options=None, ): # gh-23761 # @@ -681,6 +683,7 @@ def read_csv( mangle_dupe_cols=mangle_dupe_cols, infer_datetime_format=infer_datetime_format, skip_blank_lines=skip_blank_lines, + storage_options=storage_options, ) return _read(filepath_or_buffer, kwds) diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 3b35b54a6dc16..549d55e65546d 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -3,7 +3,7 @@ from typing import Any, Optional import warnings -from pandas._typing import FilePathOrBuffer +from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.compat import pickle_compat as pc from pandas.io.common import get_filepath_or_buffer, get_handle @@ -14,6 +14,7 @@ def to_pickle( filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, + storage_options: StorageOptions = None, ): """ Pickle (serialize) object to file. @@ -42,6 +43,16 @@ def to_pickle( protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + .. [1] https://docs.python.org/3/library/pickle.html See Also @@ -76,7 +87,10 @@ def to_pickle( >>> os.remove("./dummy.pkl") """ fp_or_buf, _, compression, should_close = get_filepath_or_buffer( - filepath_or_buffer, compression=compression, mode="wb" + filepath_or_buffer, + compression=compression, + mode="wb", + storage_options=storage_options, ) if not isinstance(fp_or_buf, str) and compression == "infer": compression = None @@ -97,7 +111,9 @@ def to_pickle( def read_pickle( - filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] = "infer" + filepath_or_buffer: FilePathOrBuffer, + compression: Optional[str] = "infer", + storage_options: StorageOptions = None, ): """ Load pickled pandas object (or any object) from file. @@ -121,6 +137,16 @@ def read_pickle( compression) If 'infer' and 'path_or_url' is not path-like, then use None (= no decompression). + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + Returns ------- unpickled : same type as object stored in file @@ -162,7 +188,7 @@ def read_pickle( >>> os.remove("./dummy.pkl") """ fp_or_buf, _, compression, should_close = get_filepath_or_buffer( - filepath_or_buffer, compression=compression + filepath_or_buffer, compression=compression, storage_options=storage_options ) if not isinstance(fp_or_buf, str) and compression == "infer": compression = None diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 7fc1bc6d3eb6c..6cf248b748107 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -244,7 +244,7 @@ class XportReader(ReaderBase, abc.Iterator): __doc__ = _xport_reader_doc def __init__( - self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None + self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None, ): self._encoding = encoding diff --git a/pandas/io/stata.py b/pandas/io/stata.py index cb23b781a7ad2..7a25617885839 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -11,7 +11,7 @@ """ from collections import abc import datetime -from io import BytesIO, IOBase +from io import BytesIO import os from pathlib import Path import struct @@ -35,7 +35,7 @@ from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array -from pandas._typing import FilePathOrBuffer, Label +from pandas._typing import FilePathOrBuffer, Label, StorageOptions from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -1035,6 +1035,7 @@ def __init__( columns: Optional[Sequence[str]] = None, order_categoricals: bool = True, chunksize: Optional[int] = None, + storage_options: StorageOptions = None, ): super().__init__() self.col_sizes: List[int] = [] @@ -1068,13 +1069,16 @@ def __init__( self._native_byteorder = _set_endianness(sys.byteorder) path_or_buf = stringify_path(path_or_buf) if isinstance(path_or_buf, str): - path_or_buf, encoding, _, should_close = get_filepath_or_buffer(path_or_buf) + path_or_buf, encoding, _, should_close = get_filepath_or_buffer( + path_or_buf, storage_options=storage_options + ) if isinstance(path_or_buf, (str, bytes)): self.path_or_buf = open(path_or_buf, "rb") - elif isinstance(path_or_buf, IOBase): + elif hasattr(path_or_buf, "read"): # Copy to BytesIO, and ensure no encoding - contents = path_or_buf.read() + pb: Any = path_or_buf + contents = pb.read() self.path_or_buf = BytesIO(contents) self._read_header() @@ -1906,6 +1910,7 @@ def read_stata( order_categoricals: bool = True, chunksize: Optional[int] = None, iterator: bool = False, + storage_options: StorageOptions = None, ) -> Union[DataFrame, StataReader]: reader = StataReader( @@ -1918,6 +1923,7 @@ def read_stata( columns=columns, order_categoricals=order_categoricals, chunksize=chunksize, + storage_options=storage_options, ) if iterator or chunksize: @@ -1931,7 +1937,9 @@ def read_stata( def _open_file_binary_write( - fname: FilePathOrBuffer, compression: Union[str, Mapping[str, str], None], + fname: FilePathOrBuffer, + compression: Union[str, Mapping[str, str], None], + storage_options: StorageOptions = None, ) -> Tuple[BinaryIO, bool, Optional[Union[str, Mapping[str, str]]]]: """ Open a binary file or no-op if file-like. @@ -1943,6 +1951,16 @@ def _open_file_binary_write( compression : {str, dict, None} The compression method to use. + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + Returns ------- file : file-like object @@ -1961,7 +1979,10 @@ def _open_file_binary_write( compression_typ, compression_args = get_compression_method(compression) compression_typ = infer_compression(fname, compression_typ) path_or_buf, _, compression_typ, _ = get_filepath_or_buffer( - fname, compression=compression_typ + fname, + mode="wb", + compression=compression_typ, + storage_options=storage_options, ) if compression_typ is not None: compression = compression_args @@ -2158,6 +2179,16 @@ class StataWriter(StataParser): .. versionadded:: 1.1.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 + Returns ------- writer : StataWriter instance @@ -2207,6 +2238,7 @@ def __init__( data_label: Optional[str] = None, variable_labels: Optional[Dict[Label, str]] = None, compression: Union[str, Mapping[str, str], None] = "infer", + storage_options: StorageOptions = None, ): super().__init__() self._convert_dates = {} if convert_dates is None else convert_dates @@ -2219,6 +2251,7 @@ def __init__( self._output_file: Optional[BinaryIO] = None # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) + self.storage_options = storage_options if byteorder is None: byteorder = sys.byteorder @@ -2505,7 +2538,7 @@ def _encode_strings(self) -> None: def write_file(self) -> None: self._file, self._own_file, compression = _open_file_binary_write( - self._fname, self._compression + self._fname, self._compression, storage_options=self.storage_options ) if compression is not None: self._output_file = self._file @@ -3088,6 +3121,7 @@ def __init__( variable_labels: Optional[Dict[Label, str]] = None, convert_strl: Optional[Sequence[Label]] = None, compression: Union[str, Mapping[str, str], None] = "infer", + storage_options: StorageOptions = None, ): # Copy to new list since convert_strl might be modified later self._convert_strl: List[Label] = [] @@ -3104,6 +3138,7 @@ def __init__( data_label=data_label, variable_labels=variable_labels, compression=compression, + storage_options=storage_options, ) self._map: Dict[str, int] = {} self._strl_blob = b"" @@ -3491,6 +3526,7 @@ def __init__( convert_strl: Optional[Sequence[Label]] = None, version: Optional[int] = None, compression: Union[str, Mapping[str, str], None] = "infer", + storage_options: StorageOptions = None, ): if version is None: version = 118 if data.shape[1] <= 32767 else 119 @@ -3513,6 +3549,7 @@ def __init__( variable_labels=variable_labels, convert_strl=convert_strl, compression=compression, + storage_options=storage_options, ) # Override version set in StataWriter117 init self._dta_version = version diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index a0723452ccb70..3e89f6ca4ae16 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -1,7 +1,18 @@ +import io + import numpy as np import pytest -from pandas import DataFrame, date_range, read_csv, read_parquet +from pandas import ( + DataFrame, + date_range, + read_csv, + read_feather, + read_json, + read_parquet, + read_pickle, + read_stata, +) import pandas._testing as tm from pandas.util import _test_decorators as td @@ -63,6 +74,16 @@ def test_to_csv(cleared_fs): tm.assert_frame_equal(df1, df2) +def test_csv_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_csv( + "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False + ) + assert fsspectest.test[0] == "csv_write" + read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"}) + assert fsspectest.test[0] == "csv_read" + + @td.skip_if_no("fastparquet") def test_to_parquet_new_file(monkeypatch, cleared_fs): """Regression test for writing to a not-yet-existent GCS Parquet file.""" @@ -71,6 +92,44 @@ def test_to_parquet_new_file(monkeypatch, cleared_fs): ) +@td.skip_if_no("pyarrow") +def test_arrowparquet_options(fsspectest): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + df = DataFrame({"a": [0]}) + df.to_parquet( + "testmem://test/test.csv", + engine="pyarrow", + compression=None, + storage_options={"test": "parquet_write"}, + ) + assert fsspectest.test[0] == "parquet_write" + read_parquet( + "testmem://test/test.csv", + engine="pyarrow", + storage_options={"test": "parquet_read"}, + ) + assert fsspectest.test[0] == "parquet_read" + + +@td.skip_if_no("fastparquet") +def test_fastparquet_options(fsspectest): + """Regression test for writing to a not-yet-existent GCS Parquet file.""" + df = DataFrame({"a": [0]}) + df.to_parquet( + "testmem://test/test.csv", + engine="fastparquet", + compression=None, + storage_options={"test": "parquet_write"}, + ) + assert fsspectest.test[0] == "parquet_write" + read_parquet( + "testmem://test/test.csv", + engine="fastparquet", + storage_options={"test": "parquet_read"}, + ) + assert fsspectest.test[0] == "parquet_read" + + @td.skip_if_no("s3fs") def test_from_s3_csv(s3_resource, tips_file): tm.assert_equal(read_csv("s3://pandas-test/tips.csv"), read_csv(tips_file)) @@ -101,3 +160,67 @@ def test_not_present_exception(): with pytest.raises(ImportError) as e: read_csv("memory://test/test.csv") assert "fsspec library is required" in str(e.value) + + +@td.skip_if_no("pyarrow") +def test_feather_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_feather("testmem://afile", storage_options={"test": "feather_write"}) + assert fsspectest.test[0] == "feather_write" + out = read_feather("testmem://afile", storage_options={"test": "feather_read"}) + assert fsspectest.test[0] == "feather_read" + tm.assert_frame_equal(df, out) + + +def test_pickle_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_pickle("testmem://afile", storage_options={"test": "pickle_write"}) + assert fsspectest.test[0] == "pickle_write" + out = read_pickle("testmem://afile", storage_options={"test": "pickle_read"}) + assert fsspectest.test[0] == "pickle_read" + tm.assert_frame_equal(df, out) + + +def test_json_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_json("testmem://afile", storage_options={"test": "json_write"}) + assert fsspectest.test[0] == "json_write" + out = read_json("testmem://afile", storage_options={"test": "json_read"}) + assert fsspectest.test[0] == "json_read" + tm.assert_frame_equal(df, out) + + +def test_stata_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_stata( + "testmem://afile", storage_options={"test": "stata_write"}, write_index=False + ) + assert fsspectest.test[0] == "stata_write" + out = read_stata("testmem://afile", storage_options={"test": "stata_read"}) + assert fsspectest.test[0] == "stata_read" + tm.assert_frame_equal(df, out.astype("int64")) + + +@td.skip_if_no("tabulate") +def test_markdown_options(fsspectest): + df = DataFrame({"a": [0]}) + df.to_markdown("testmem://afile", storage_options={"test": "md_write"}) + assert fsspectest.test[0] == "md_write" + assert fsspectest.cat("afile") + + +@td.skip_if_no("pyarrow") +def test_non_fsspec_options(): + with pytest.raises(ValueError, match="storage_options"): + read_csv("localfile", storage_options={"a": True}) + with pytest.raises(ValueError, match="storage_options"): + # separate test for parquet, which has a different code path + read_parquet("localfile", storage_options={"a": True}) + by = io.BytesIO() + + with pytest.raises(ValueError, match="storage_options"): + read_csv(by, storage_options={"a": True}) + + df = DataFrame({"a": [0]}) + with pytest.raises(ValueError, match="storage_options"): + df.to_parquet("nonfsspecpath", storage_options={"a": True}) diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py index 5e0f7edf4d8ae..a137e76b1696b 100644 --- a/pandas/tests/io/test_s3.py +++ b/pandas/tests/io/test_s3.py @@ -32,7 +32,7 @@ def test_read_without_creds_from_pub_bucket(): @tm.network @td.skip_if_no("s3fs") -def test_read_with_creds_from_pub_bucke(): +def test_read_with_creds_from_pub_bucket(): # Ensure we can read from a public bucket with credentials # GH 34626 # Use Amazon Open Data Registry - https://registry.opendata.aws/gdelt From 32abe63995c721bc6b7494c815bf7c6b714bb918 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Mon, 10 Aug 2020 23:52:08 +0100 Subject: [PATCH 249/632] REF/PERF: Move MultiIndex._tuples to MultiIndex._cache (#35641) --- pandas/core/indexes/multi.py | 20 ++++++---------- pandas/io/pytables.py | 8 +++---- pandas/tests/indexes/multi/test_compat.py | 29 +++++++++++++++++------ 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 13927dede5542..448c2dfe4a29d 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -243,7 +243,6 @@ class MultiIndex(Index): _comparables = ["names"] rename = Index.set_names - _tuples = None sortorder: Optional[int] # -------------------------------------------------------------------- @@ -634,16 +633,9 @@ def from_frame(cls, df, sortorder=None, names=None): # -------------------------------------------------------------------- - @property + @cache_readonly def _values(self): # We override here, since our parent uses _data, which we don't use. - return self.values - - @property - def values(self): - if self._tuples is not None: - return self._tuples - values = [] for i in range(self.nlevels): @@ -657,8 +649,12 @@ def values(self): vals = np.array(vals, copy=False) values.append(vals) - self._tuples = lib.fast_zip(values) - return self._tuples + arr = lib.fast_zip(values) + return arr + + @property + def values(self): + return self._values @property def array(self): @@ -737,7 +733,6 @@ def _set_levels( if any(names): self._set_names(names) - self._tuples = None self._reset_cache() def set_levels(self, levels, level=None, inplace=None, verify_integrity=True): @@ -906,7 +901,6 @@ def _set_codes( self._codes = new_codes - self._tuples = None self._reset_cache() def set_codes(self, codes, level=None, inplace=None, verify_integrity=True): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index aeb7b3e044794..2abc570a04de3 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -320,6 +320,10 @@ def read_hdf( mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. + errors : str, default 'strict' + Specifies how encoding and decoding errors are to be handled. + See the errors argument for :func:`open` for a full list + of options. where : list, optional A list of Term (or convertible) objects. start : int, optional @@ -332,10 +336,6 @@ def read_hdf( Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. - errors : str, default 'strict' - Specifies how encoding and decoding errors are to be handled. - See the errors argument for :func:`open` for a full list - of options. **kwargs Additional keyword arguments passed to HDFStore. diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index b2500efef9e03..72b5ed0edaa78 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -68,24 +68,33 @@ def test_inplace_mutation_resets_values(): mi1 = MultiIndex(levels=levels, codes=codes) mi2 = MultiIndex(levels=levels2, codes=codes) + + # instantiating MultiIndex should not access/cache _.values + assert "_values" not in mi1._cache + assert "_values" not in mi2._cache + vals = mi1.values.copy() vals2 = mi2.values.copy() - assert mi1._tuples is not None + # accessing .values should cache ._values + assert mi1._values is mi1._cache["_values"] + assert mi1.values is mi1._cache["_values"] + assert isinstance(mi1._cache["_values"], np.ndarray) # Make sure level setting works new_vals = mi1.set_levels(levels2).values tm.assert_almost_equal(vals2, new_vals) - # Non-inplace doesn't kill _tuples [implementation detail] - tm.assert_almost_equal(mi1._tuples, vals) + # Non-inplace doesn't drop _values from _cache [implementation detail] + tm.assert_almost_equal(mi1._cache["_values"], vals) # ...and values is still same too tm.assert_almost_equal(mi1.values, vals) - # Inplace should kill _tuples + # Inplace should drop _values from _cache with tm.assert_produces_warning(FutureWarning): mi1.set_levels(levels2, inplace=True) + assert "_values" not in mi1._cache tm.assert_almost_equal(mi1.values, vals2) # Make sure label setting works too @@ -95,18 +104,24 @@ def test_inplace_mutation_resets_values(): # Must be 1d array of tuples assert exp_values.shape == (6,) - new_values = mi2.set_codes(codes2).values + + new_mi = mi2.set_codes(codes2) + assert "_values" not in new_mi._cache + new_values = new_mi.values + assert "_values" in new_mi._cache # Not inplace shouldn't change - tm.assert_almost_equal(mi2._tuples, vals2) + tm.assert_almost_equal(mi2._cache["_values"], vals2) # Should have correct values tm.assert_almost_equal(exp_values, new_values) - # ...and again setting inplace should kill _tuples, etc + # ...and again setting inplace should drop _values from _cache, etc with tm.assert_produces_warning(FutureWarning): mi2.set_codes(codes2, inplace=True) + assert "_values" not in mi2._cache tm.assert_almost_equal(mi2.values, new_values) + assert "_values" in mi2._cache def test_ndarray_compat_properties(idx, compat_props): From cac9f28364a3fe050dd01b141abbbf4396312267 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Mon, 10 Aug 2020 18:02:11 -0500 Subject: [PATCH 250/632] Doc notes for core team members (#35608) --- doc/source/development/maintaining.rst | 42 ++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/doc/source/development/maintaining.rst b/doc/source/development/maintaining.rst index 9f9e9dc2631f3..cd084ab263477 100644 --- a/doc/source/development/maintaining.rst +++ b/doc/source/development/maintaining.rst @@ -132,17 +132,24 @@ respond or self-close their issue if it's determined that the behavior is not a or the feature is out of scope. Sometimes reporters just go away though, and we'll close the issue after the conversation has died. +.. _maintaining.reviewing: + Reviewing pull requests ----------------------- Anybody can review a pull request: regular contributors, triagers, or core-team -members. Here are some guidelines to check. +members. But only core-team members can merge pull requets when they're ready. + +Here are some things to check when reviewing a pull request. -* Tests should be in a sensible location. +* Tests should be in a sensible location: in the same file as closely related tests. * New public APIs should be included somewhere in ``doc/source/reference/``. * New / changed API should use the ``versionadded`` or ``versionchanged`` directives in the docstring. * User-facing changes should have a whatsnew in the appropriate file. * Regression tests should reference the original GitHub issue number like ``# GH-1234``. +* The pull request should be labeled and assigned the appropriate milestone (the next patch release + for regression fixes and small bug fixes, the next minor milestone otherwise) +* Changes should comply with our :ref:`policies.version`. Cleaning up old issues ---------------------- @@ -189,5 +196,34 @@ being helpful on the issue tracker. The current list of core-team members is at https://github.com/pandas-dev/pandas-governance/blob/master/people.md + +.. _maintaining.merging: + +Merging pull requests +--------------------- + +Only core team members can merge pull requests. We have a few guidelines. + +1. You should typically not self-merge your own pull requests. Exceptions include + things like small changes to fix CI (e.g. pinning a package version). +2. You should not merge pull requests that have an active discussion, or pull + requests that has any ``-1`` votes from a core maintainer. Pandas operates + by consensus. +3. For larger changes, it's good to have a +1 from at least two core team members. + +In addition to the items listed in :ref:`maintaining.closing`, you should verify +that the pull request is assigned the correct milestone. + +Pull requests merged with a patch-release milestone will typically be backported +by our bot. Verify that the bot noticed the merge (it will leave a comment within +a minute typically). If a manual backport is needed please do that, and remove +the "Needs backport" label once you've done it manually. If you forget to assign +a milestone before tagging, you can request the bot to backport it with: + +.. code-block:: console + + @Meeseeksdev backport + + .. _governance documents: https://github.com/pandas-dev/pandas-governance -.. _list of permissions: https://help.github.com/en/github/setting-up-and-managing-organizations-and-teams/repository-permission-levels-for-an-organization \ No newline at end of file +.. _list of permissions: https://help.github.com/en/github/setting-up-and-managing-organizations-and-teams/repository-permission-levels-for-an-organization From 993ab0825cc4d47f99490a5cf6255bf6730c580f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 10 Aug 2020 17:01:57 -0700 Subject: [PATCH 251/632] BUG: DataFrame.apply with func altering row in-place (#35633) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/apply.py | 2 ++ pandas/tests/frame/apply/test_frame_apply.py | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index e5860644fa371..415f9e508feb8 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) +- Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 6b8d7dc35fe95..6d44cf917a07a 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -389,6 +389,8 @@ def series_generator(self): blk = mgr.blocks[0] for (arr, name) in zip(values, self.index): + # GH#35462 re-pin mgr in case setitem changed it + ser._mgr = mgr blk.values = arr ser.name = name yield ser diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 3a32278e2a4b1..538978358c8e7 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1522,3 +1522,22 @@ def test_apply_dtype(self, col): expected = df.dtypes tm.assert_series_equal(result, expected) + + +def test_apply_mutating(): + # GH#35462 case where applied func pins a new BlockManager to a row + df = pd.DataFrame({"a": range(100), "b": range(100, 200)}) + + def func(row): + mgr = row._mgr + row.loc["a"] += 1 + assert row._mgr is not mgr + return row + + expected = df.copy() + expected["a"] += 1 + + result = df.apply(func, axis=1) + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df, result) From 0639e7f71507fae96bca674003e5904444104ccc Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 10 Aug 2020 17:35:38 -0700 Subject: [PATCH 252/632] REF: use consistent pattern in tslibs.vectorized (#35613) --- pandas/_libs/tslibs/vectorized.pyx | 149 +++++++++++++---------------- 1 file changed, 66 insertions(+), 83 deletions(-) diff --git a/pandas/_libs/tslibs/vectorized.pyx b/pandas/_libs/tslibs/vectorized.pyx index b23f8255a76ac..c3c78ca54885a 100644 --- a/pandas/_libs/tslibs/vectorized.pyx +++ b/pandas/_libs/tslibs/vectorized.pyx @@ -275,44 +275,38 @@ cpdef ndarray[int64_t] normalize_i8_timestamps(const int64_t[:] stamps, tzinfo t int64_t[:] deltas str typ Py_ssize_t[:] pos - int64_t delta, local_val - - if tz is None or is_utc(tz): - with nogil: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = stamps[i] - result[i] = normalize_i8_stamp(local_val) + int64_t local_val, delta = NPY_NAT + bint use_utc = False, use_tzlocal = False, use_fixed = False + + if is_utc(tz) or tz is None: + use_utc = True elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - result[i] = normalize_i8_stamp(local_val) + use_tzlocal = True else: - # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - if typ not in ["pytz", "dateutil"]: # static/fixed; in this case we know that len(delta) == 1 + use_fixed = True delta = deltas[0] - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = stamps[i] + delta - result[i] = normalize_i8_stamp(local_val) else: pos = trans.searchsorted(stamps, side="right") - 1 - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = stamps[i] + deltas[pos[i]] - result[i] = normalize_i8_stamp(local_val) + + for i in range(n): + # TODO: reinstate nogil for use_utc case? + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + + if use_utc: + local_val = stamps[i] + elif use_tzlocal: + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + elif use_fixed: + local_val = stamps[i] + delta + else: + local_val = stamps[i] + deltas[pos[i]] + + result[i] = normalize_i8_stamp(local_val) return result.base # `.base` to access underlying ndarray @@ -339,40 +333,36 @@ def is_date_array_normalized(const int64_t[:] stamps, tzinfo tz=None): ndarray[int64_t] trans int64_t[:] deltas intp_t[:] pos - int64_t local_val, delta + int64_t local_val, delta = NPY_NAT str typ int64_t day_nanos = 24 * 3600 * 1_000_000_000 + bint use_utc = False, use_tzlocal = False, use_fixed = False - if tz is None or is_utc(tz): - for i in range(n): - local_val = stamps[i] - if local_val % day_nanos != 0: - return False - + if is_utc(tz) or tz is None: + use_utc = True elif is_tzlocal(tz): - for i in range(n): - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - if local_val % day_nanos != 0: - return False + use_tzlocal = True else: trans, deltas, typ = get_dst_info(tz) - if typ not in ["pytz", "dateutil"]: # static/fixed; in this case we know that len(delta) == 1 + use_fixed = True delta = deltas[0] - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - local_val = stamps[i] + delta - if local_val % day_nanos != 0: - return False + else: + pos = trans.searchsorted(stamps, side="right") - 1 + for i in range(n): + if use_utc: + local_val = stamps[i] + elif use_tzlocal: + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + elif use_fixed: + local_val = stamps[i] + delta else: - pos = trans.searchsorted(stamps) - 1 - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - local_val = stamps[i] + deltas[pos[i]] - if local_val % day_nanos != 0: - return False + local_val = stamps[i] + deltas[pos[i]] + + if local_val % day_nanos != 0: + return False return True @@ -390,45 +380,38 @@ def dt64arr_to_periodarr(const int64_t[:] stamps, int freq, tzinfo tz): int64_t[:] deltas Py_ssize_t[:] pos npy_datetimestruct dts - int64_t local_val + int64_t local_val, delta = NPY_NAT + bint use_utc = False, use_tzlocal = False, use_fixed = False if is_utc(tz) or tz is None: - with nogil: - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - result[i] = get_period_ordinal(&dts, freq) - + use_utc = True elif is_tzlocal(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) - dt64_to_dtstruct(local_val, &dts) - result[i] = get_period_ordinal(&dts, freq) + use_tzlocal = True else: - # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - if typ not in ["pytz", "dateutil"]: # static/fixed; in this case we know that len(delta) == 1 - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) - result[i] = get_period_ordinal(&dts, freq) + use_fixed = True + delta = deltas[0] else: pos = trans.searchsorted(stamps, side="right") - 1 - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i] + deltas[pos[i]], &dts) - result[i] = get_period_ordinal(&dts, freq) + for i in range(n): + # TODO: reinstate nogil for use_utc case? + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + + if use_utc: + local_val = stamps[i] + elif use_tzlocal: + local_val = tz_convert_utc_to_tzlocal(stamps[i], tz) + elif use_fixed: + local_val = stamps[i] + delta + else: + local_val = stamps[i] + deltas[pos[i]] + + dt64_to_dtstruct(local_val, &dts) + result[i] = get_period_ordinal(&dts, freq) return result.base # .base to get underlying ndarray From c87e40c23575d28908bbfb806682133e45a15bca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ale=C5=A1=20Erjavec?= Date: Tue, 11 Aug 2020 02:36:26 +0200 Subject: [PATCH 253/632] [FIX] Handle decimal and thousand separator in 'round_trip' converer (#35377) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/src/parser/tokenizer.c | 57 +++++++++++- pandas/tests/io/parser/test_c_parser_only.py | 98 ++++++++++++++++++++ 3 files changed, 154 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 94bb265c32e4c..ebc8ebf0dc2f7 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -155,6 +155,7 @@ I/O ^^^ - Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) +- In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - Plotting diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index a195c0daf5271..df8ec68986ccb 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -1778,20 +1778,73 @@ double precise_xstrtod(const char *str, char **endptr, char decimal, return number; } +/* copy a decimal number string with `decimal`, `tsep` as decimal point + and thousands separator to an equivalent c-locale decimal string (striping + `tsep`, replacing `decimal` with '.'). The returned memory should be free-d + with a call to `free`. +*/ + +char* _str_copy_decimal_str_c(const char *s, char **endpos, char decimal, + char tsep) { + const char *p = s; + size_t length = strlen(s); + char *s_copy = malloc(length + 1); + char *dst = s_copy; + // Copy Leading sign + if (*p == '+' || *p == '-') { + *dst++ = *p++; + } + // Copy integer part dropping `tsep` + while (isdigit_ascii(*p)) { + *dst++ = *p++; + p += (tsep != '\0' && *p == tsep); + } + // Replace `decimal` with '.' + if (*p == decimal) { + *dst++ = '.'; + p++; + } + // Copy the remainder of the string as is. + strncpy(dst, p, length + 1 - (p - s)); + if (endpos != NULL) + *endpos = (char *)(s + length); + return s_copy; +} + + double round_trip(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing, int *error, int *maybe_int) { + // 'normalize' representation to C-locale; replace decimal with '.' and + // remove t(housand)sep. + char *endptr; + char *pc = _str_copy_decimal_str_c(p, &endptr, decimal, tsep); // This is called from a nogil block in parsers.pyx // so need to explicitly get GIL before Python calls PyGILState_STATE gstate; gstate = PyGILState_Ensure(); - - double r = PyOS_string_to_double(p, q, 0); + char *endpc; + double r = PyOS_string_to_double(pc, &endpc, 0); + // PyOS_string_to_double needs to consume the whole string + if (endpc == pc + strlen(pc)) { + if (q != NULL) { + // report endptr from source string (p) + *q = (char *) endptr; + } + } else { + *error = -1; + if (q != NULL) { + // p and pc are different len due to tsep removal. Can't report + // how much it has consumed of p. Just rewind to beginning. + *q = (char *)p; + } + } if (maybe_int != NULL) *maybe_int = 0; if (PyErr_Occurred() != NULL) *error = -1; else if (r == Py_HUGE_VAL) *error = (int)Py_HUGE_VAL; PyErr_Clear(); PyGILState_Release(gstate); + free(pc); return r; } diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index d76d01904731a..50179fc1ec4b8 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -606,3 +606,101 @@ def test_unix_style_breaks(c_parser_only): result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c") expected = DataFrame(columns=["col_1", "col_2", "col_3"]) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"]) +@pytest.mark.parametrize( + "data,thousands,decimal", + [ + ( + """A|B|C +1|2,334.01|5 +10|13|10. +""", + ",", + ".", + ), + ( + """A|B|C +1|2.334,01|5 +10|13|10, +""", + ".", + ",", + ), + ], +) +def test_1000_sep_with_decimal( + c_parser_only, data, thousands, decimal, float_precision +): + parser = c_parser_only + expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]}) + + result = parser.read_csv( + StringIO(data), + sep="|", + thousands=thousands, + decimal=decimal, + float_precision=float_precision, + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "float_precision", [None, "high", "round_trip"], +) +@pytest.mark.parametrize( + "value,expected", + [ + ("-1,0", -1.0), + ("-1,2e0", -1.2), + ("-1e0", -1.0), + ("+1e0", 1.0), + ("+1e+0", 1.0), + ("+1e-1", 0.1), + ("+,1e1", 1.0), + ("+1,e0", 1.0), + ("-,1e1", -1.0), + ("-1,e0", -1.0), + ("0,1", 0.1), + ("1,", 1.0), + (",1", 0.1), + ("-,1", -0.1), + ("1_,", 1.0), + ("1_234,56", 1234.56), + ("1_234,56e0", 1234.56), + # negative cases; must not parse as float + ("_", "_"), + ("-_", "-_"), + ("-_1", "-_1"), + ("-_1e0", "-_1e0"), + ("_1", "_1"), + ("_1,", "_1,"), + ("_1,_", "_1,_"), + ("_1e0", "_1e0"), + ("1,2e_1", "1,2e_1"), + ("1,2e1_0", "1,2e1_0"), + ("1,_2", "1,_2"), + (",1__2", ",1__2"), + (",1e", ",1e"), + ("-,1e", "-,1e"), + ("1_000,000_000", "1_000,000_000"), + ("1,e1_2", "1,e1_2"), + ], +) +def test_1000_sep_decimal_float_precision( + c_parser_only, value, expected, float_precision +): + # test decimal and thousand sep handling in across 'float_precision' + # parsers + parser = c_parser_only + df = parser.read_csv( + StringIO(value), + sep="|", + thousands="_", + decimal=",", + header=None, + float_precision=float_precision, + ) + val = df.iloc[0, 0] + assert val == expected From 83807088329b2a7e6422e0d0ba460870a265d3d2 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Mon, 10 Aug 2020 20:17:47 -0500 Subject: [PATCH 254/632] Drop Python 3.6 support (#35214) * DEPS: drop 3.6 (#34472) * DEPS: drop 3.6 (#34472) * DEPS: fix file name (#34472) * DEPS: fix import (#34472) * DEPS: fix job name (#34472) * DEPS: resolve min version conflicts (#34472) * DEPS: fix env name (#34472) * DEPS: remove py36 check in test, bump matplotlib (#34472) * DEPS: fix travis 37 locale (#34472) * DEPS: remove PY37 check from tests (#34472) * DEPS: remove import (#34472) * DEPS: remove PY37 in benchmark (#34472) * try to fix timeout * pytable version * update minimum version * remove xfail for test apply * remove import * try to fix timeout * try to fix timeout * try to fix timeout * bump to 3.7.1 to fix timeout * migrate ci * fix env name * remove py37-locale from azure * resolve conflicts * update ci * update ci * sync with master * whatsnew and install doc * whatsnew and install doc * update environment.yml * update environment.yml * uncomment azure p37 locale * move min pyarrow test * bumpy numpy to 1.16.5 * bumpy numpy to 1.16.5 * fix 32bit * comment out 32bit CI * update numpy version in numpy/__init__.py * remove import from numpy/__init__.py * filter DeprecationWarning * filter DeprecationWarning * skip unreliable test for windows * skip unreliable test for windows * fix parameter order in docstring * skip test * skip test --- .travis.yml | 4 +- asv_bench/benchmarks/package.py | 24 +- ci/azure/posix.yml | 52 ++--- ci/azure/windows.yml | 12 +- ...zure-36-32bit.yaml => azure-37-32bit.yaml} | 8 +- ci/deps/azure-37-locale.yaml | 8 +- ...le_slow.yaml => azure-37-locale_slow.yaml} | 18 +- ...ns.yaml => azure-37-minimum_versions.yaml} | 17 +- ...{azure-36-slow.yaml => azure-37-slow.yaml} | 2 +- ...re-36-locale.yaml => azure-38-locale.yaml} | 16 +- ...7-numpydev.yaml => azure-38-numpydev.yaml} | 2 +- ...zure-macos-36.yaml => azure-macos-37.yaml} | 6 +- ci/deps/azure-windows-37.yaml | 4 +- ...-windows-36.yaml => azure-windows-38.yaml} | 8 +- ...{travis-36-cov.yaml => travis-37-cov.yaml} | 6 +- ...s-36-locale.yaml => travis-37-locale.yaml} | 10 +- doc/source/getting_started/install.rst | 36 +-- doc/source/whatsnew/v1.2.0.rst | 78 +++++++ environment.yml | 6 +- pandas/__init__.py | 208 +++++------------- pandas/compat/__init__.py | 1 - pandas/compat/numpy/__init__.py | 9 +- pandas/core/frame.py | 5 +- pandas/tests/api/test_api.py | 25 +-- .../arrays/categorical/test_constructors.py | 3 - pandas/tests/extension/arrow/test_bool.py | 4 - pandas/tests/extension/test_numpy.py | 6 - pandas/tests/frame/test_api.py | 13 +- pandas/tests/frame/test_constructors.py | 5 +- pandas/tests/groupby/test_categorical.py | 9 - .../tests/io/json/test_json_table_schema.py | 6 + pandas/tests/io/json/test_pandas.py | 3 + pandas/tests/io/parser/test_common.py | 2 + pandas/tests/scalar/test_nat.py | 4 - pandas/tests/tseries/offsets/test_offsets.py | 11 +- pandas/util/__init__.py | 30 +-- pyproject.toml | 8 +- requirements-dev.txt | 6 +- setup.py | 9 +- 39 files changed, 283 insertions(+), 401 deletions(-) rename ci/deps/{azure-36-32bit.yaml => azure-37-32bit.yaml} (76%) rename ci/deps/{azure-36-locale_slow.yaml => azure-37-locale_slow.yaml} (67%) rename ci/deps/{azure-36-minimum_versions.yaml => azure-37-minimum_versions.yaml} (70%) rename ci/deps/{azure-36-slow.yaml => azure-37-slow.yaml} (96%) rename ci/deps/{azure-36-locale.yaml => azure-38-locale.yaml} (69%) rename ci/deps/{azure-37-numpydev.yaml => azure-38-numpydev.yaml} (96%) rename ci/deps/{azure-macos-36.yaml => azure-macos-37.yaml} (89%) rename ci/deps/{azure-windows-36.yaml => azure-windows-38.yaml} (84%) rename ci/deps/{travis-36-cov.yaml => travis-37-cov.yaml} (93%) rename ci/deps/{travis-36-locale.yaml => travis-37-locale.yaml} (85%) diff --git a/.travis.yml b/.travis.yml index b016cf386098e..2e98cf47aea3e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,7 +45,7 @@ matrix: - JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard)" - env: - - JOB="3.6, locale" ENV_FILE="ci/deps/travis-36-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1" + - JOB="3.7, locale" ENV_FILE="ci/deps/travis-37-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1" services: - mysql - postgresql @@ -54,7 +54,7 @@ matrix: # Enabling Deprecations when running tests # PANDAS_TESTING_MODE="deprecate" causes DeprecationWarning messages to be displayed in the logs # See pandas/_testing.py for more details. - - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1" + - JOB="3.7, coverage" ENV_FILE="ci/deps/travis-37-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1" services: - mysql - postgresql diff --git a/asv_bench/benchmarks/package.py b/asv_bench/benchmarks/package.py index 8ca33db361fa0..34fe4929a752b 100644 --- a/asv_bench/benchmarks/package.py +++ b/asv_bench/benchmarks/package.py @@ -4,22 +4,16 @@ import subprocess import sys -from pandas.compat import PY37 - class TimeImport: def time_import(self): - if PY37: - # on py37+ we the "-X importtime" usage gives us a more precise - # measurement of the import time we actually care about, - # without the subprocess or interpreter overhead - cmd = [sys.executable, "-X", "importtime", "-c", "import pandas as pd"] - p = subprocess.run(cmd, stderr=subprocess.PIPE) - - line = p.stderr.splitlines()[-1] - field = line.split(b"|")[-2].strip() - total = int(field) # microseconds - return total + # on py37+ we the "-X importtime" usage gives us a more precise + # measurement of the import time we actually care about, + # without the subprocess or interpreter overhead + cmd = [sys.executable, "-X", "importtime", "-c", "import pandas as pd"] + p = subprocess.run(cmd, stderr=subprocess.PIPE) - cmd = [sys.executable, "-c", "import pandas as pd"] - subprocess.run(cmd, stderr=subprocess.PIPE) + line = p.stderr.splitlines()[-1] + field = line.split(b"|")[-2].strip() + total = int(field) # microseconds + return total diff --git a/ci/azure/posix.yml b/ci/azure/posix.yml index f716974f6add1..9f8174b4fa678 100644 --- a/ci/azure/posix.yml +++ b/ci/azure/posix.yml @@ -9,20 +9,20 @@ jobs: strategy: matrix: ${{ if eq(parameters.name, 'macOS') }}: - py36_macos: - ENV_FILE: ci/deps/azure-macos-36.yaml - CONDA_PY: "36" + py37_macos: + ENV_FILE: ci/deps/azure-macos-37.yaml + CONDA_PY: "37" PATTERN: "not slow and not network" ${{ if eq(parameters.name, 'Linux') }}: - py36_minimum_versions: - ENV_FILE: ci/deps/azure-36-minimum_versions.yaml - CONDA_PY: "36" + py37_minimum_versions: + ENV_FILE: ci/deps/azure-37-minimum_versions.yaml + CONDA_PY: "37" PATTERN: "not slow and not network and not clipboard" - py36_locale_slow_old_np: - ENV_FILE: ci/deps/azure-36-locale_slow.yaml - CONDA_PY: "36" + py37_locale_slow: + ENV_FILE: ci/deps/azure-37-locale_slow.yaml + CONDA_PY: "37" PATTERN: "slow" # pandas does not use the language (zh_CN), but should support different encodings (utf8) # we should test with encodings different than utf8, but doesn't seem like Ubuntu supports any @@ -30,36 +30,36 @@ jobs: LC_ALL: "zh_CN.utf8" EXTRA_APT: "language-pack-zh-hans" - py36_slow: - ENV_FILE: ci/deps/azure-36-slow.yaml - CONDA_PY: "36" + py37_slow: + ENV_FILE: ci/deps/azure-37-slow.yaml + CONDA_PY: "37" PATTERN: "slow" - py36_locale: - ENV_FILE: ci/deps/azure-36-locale.yaml - CONDA_PY: "36" + py37_locale: + ENV_FILE: ci/deps/azure-37-locale.yaml + CONDA_PY: "37" PATTERN: "not slow and not network" LANG: "it_IT.utf8" LC_ALL: "it_IT.utf8" EXTRA_APT: "language-pack-it xsel" - #py36_32bit: - # ENV_FILE: ci/deps/azure-36-32bit.yaml - # CONDA_PY: "36" - # PATTERN: "not slow and not network and not clipboard" - # BITS32: "yes" +# py37_32bit: +# ENV_FILE: ci/deps/azure-37-32bit.yaml +# CONDA_PY: "37" +# PATTERN: "not slow and not network and not clipboard" +# BITS32: "yes" - py37_locale: - ENV_FILE: ci/deps/azure-37-locale.yaml - CONDA_PY: "37" + py38_locale: + ENV_FILE: ci/deps/azure-38-locale.yaml + CONDA_PY: "38" PATTERN: "not slow and not network" LANG: "zh_CN.utf8" LC_ALL: "zh_CN.utf8" EXTRA_APT: "language-pack-zh-hans xsel" - py37_np_dev: - ENV_FILE: ci/deps/azure-37-numpydev.yaml - CONDA_PY: "37" + py38_np_dev: + ENV_FILE: ci/deps/azure-38-numpydev.yaml + CONDA_PY: "38" PATTERN: "not slow and not network" TEST_ARGS: "-W error" PANDAS_TESTING_MODE: "deprecate" diff --git a/ci/azure/windows.yml b/ci/azure/windows.yml index 87f1bfd2adb79..5938ba1fd69f5 100644 --- a/ci/azure/windows.yml +++ b/ci/azure/windows.yml @@ -8,16 +8,16 @@ jobs: vmImage: ${{ parameters.vmImage }} strategy: matrix: - py36_np15: - ENV_FILE: ci/deps/azure-windows-36.yaml - CONDA_PY: "36" - PATTERN: "not slow and not network" - - py37_np18: + py37_np16: ENV_FILE: ci/deps/azure-windows-37.yaml CONDA_PY: "37" PATTERN: "not slow and not network" + py38_np18: + ENV_FILE: ci/deps/azure-windows-38.yaml + CONDA_PY: "38" + PATTERN: "not slow and not network" + steps: - powershell: | Write-Host "##vso[task.prependpath]$env:CONDA\Scripts" diff --git a/ci/deps/azure-36-32bit.yaml b/ci/deps/azure-37-32bit.yaml similarity index 76% rename from ci/deps/azure-36-32bit.yaml rename to ci/deps/azure-37-32bit.yaml index 15704cf0d5427..8e0cd73a9536d 100644 --- a/ci/deps/azure-36-32bit.yaml +++ b/ci/deps/azure-37-32bit.yaml @@ -3,10 +3,10 @@ channels: - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.7.* # tools - ### Cython 0.29.13 and pytest 5.0.1 for 32 bits are not available with conda, installing below with pip instead + ### Cython 0.29.16 and pytest 5.0.1 for 32 bits are not available with conda, installing below with pip instead - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines @@ -15,12 +15,12 @@ dependencies: - attrs=19.1.0 - gcc_linux-32 - gxx_linux-32 - - numpy=1.14.* - python-dateutil - - pytz=2017.2 + - pytz=2017.3 # see comment above - pip - pip: - cython>=0.29.16 + - numpy>=1.16.5 - pytest>=5.0.1 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index 6f64c81f299d1..a6552aa096a22 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -1,5 +1,6 @@ name: pandas-dev channels: + - defaults - conda-forge dependencies: - python=3.7.* @@ -22,7 +23,7 @@ dependencies: - moto - nomkl - numexpr - - numpy + - numpy=1.16.* - openpyxl - pytables - python-dateutil @@ -32,7 +33,4 @@ dependencies: - xlrd - xlsxwriter - xlwt - - pyarrow>=0.15 - - pip - - pip: - - pyxlsb + - moto diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-37-locale_slow.yaml similarity index 67% rename from ci/deps/azure-36-locale_slow.yaml rename to ci/deps/azure-37-locale_slow.yaml index c086b3651afc3..3ccb66e09fe7e 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-37-locale_slow.yaml @@ -3,7 +3,7 @@ channels: - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.7.* # tools - cython>=0.29.16 @@ -16,17 +16,15 @@ dependencies: - beautifulsoup4=4.6.0 - bottleneck=1.2.* - lxml - - matplotlib=2.2.2 - - numpy=1.14.* + - matplotlib=3.0.0 + - numpy=1.16.* - openpyxl=2.5.7 - python-dateutil - python-blosc - - pytz=2017.2 + - pytz=2017.3 - scipy - - sqlalchemy=1.1.4 + - sqlalchemy=1.2.8 - xlrd=1.1.0 - - xlsxwriter=0.9.8 - - xlwt=1.2.0 - - pip - - pip: - - html5lib==1.0b2 + - xlsxwriter=1.0.2 + - xlwt=1.3.0 + - html5lib=1.0.1 diff --git a/ci/deps/azure-36-minimum_versions.yaml b/ci/deps/azure-37-minimum_versions.yaml similarity index 70% rename from ci/deps/azure-36-minimum_versions.yaml rename to ci/deps/azure-37-minimum_versions.yaml index f5af7bcf36189..94cc5812bcc10 100644 --- a/ci/deps/azure-36-minimum_versions.yaml +++ b/ci/deps/azure-37-minimum_versions.yaml @@ -2,7 +2,7 @@ name: pandas-dev channels: - conda-forge dependencies: - - python=3.6.1 + - python=3.7.1 # tools - cython=0.29.16 @@ -15,16 +15,17 @@ dependencies: # pandas dependencies - beautifulsoup4=4.6.0 - bottleneck=1.2.1 - - jinja2=2.8 + - jinja2=2.10 - numba=0.46.0 - - numexpr=2.6.2 - - numpy=1.15.4 + - numexpr=2.6.8 + - numpy=1.16.5 - openpyxl=2.5.7 - - pytables=3.4.3 + - pytables=3.4.4 - python-dateutil=2.7.3 - - pytz=2017.2 + - pytz=2017.3 + - pyarrow=0.15 - scipy=1.2 - xlrd=1.1.0 - - xlsxwriter=0.9.8 - - xlwt=1.2.0 + - xlsxwriter=1.0.2 + - xlwt=1.3.0 - html5lib=1.0.1 diff --git a/ci/deps/azure-36-slow.yaml b/ci/deps/azure-37-slow.yaml similarity index 96% rename from ci/deps/azure-36-slow.yaml rename to ci/deps/azure-37-slow.yaml index 87bad59fa4873..e8ffd3d74ca5e 100644 --- a/ci/deps/azure-36-slow.yaml +++ b/ci/deps/azure-37-slow.yaml @@ -3,7 +3,7 @@ channels: - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.7.* # tools - cython>=0.29.16 diff --git a/ci/deps/azure-36-locale.yaml b/ci/deps/azure-38-locale.yaml similarity index 69% rename from ci/deps/azure-36-locale.yaml rename to ci/deps/azure-38-locale.yaml index 3034ed3dc43af..c466a5929ea29 100644 --- a/ci/deps/azure-36-locale.yaml +++ b/ci/deps/azure-38-locale.yaml @@ -1,9 +1,8 @@ name: pandas-dev channels: - - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.8.* # tools - cython>=0.29.16 @@ -19,14 +18,12 @@ dependencies: - ipython - jinja2 - lxml - - matplotlib=3.0.* + - matplotlib <3.3.0 + - moto - nomkl - numexpr - - numpy=1.15.* + - numpy - openpyxl - # lowest supported version of pyarrow (putting it here instead of in - # azure-36-minimum_versions because it needs numpy >= 1.14) - - pyarrow=0.13 - pytables - python-dateutil - pytz @@ -35,4 +32,7 @@ dependencies: - xlrd - xlsxwriter - xlwt - - moto + - pyarrow>=0.15 + - pip + - pip: + - pyxlsb diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-38-numpydev.yaml similarity index 96% rename from ci/deps/azure-37-numpydev.yaml rename to ci/deps/azure-38-numpydev.yaml index 5cb58756a6ac1..37592086d49e3 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-38-numpydev.yaml @@ -2,7 +2,7 @@ name: pandas-dev channels: - defaults dependencies: - - python=3.7.* + - python=3.8.* # tools - pytest>=5.0.1 diff --git a/ci/deps/azure-macos-36.yaml b/ci/deps/azure-macos-37.yaml similarity index 89% rename from ci/deps/azure-macos-36.yaml rename to ci/deps/azure-macos-37.yaml index eeea249a19ca1..a5a69b9a59576 100644 --- a/ci/deps/azure-macos-36.yaml +++ b/ci/deps/azure-macos-37.yaml @@ -2,7 +2,7 @@ name: pandas-dev channels: - defaults dependencies: - - python=3.6.* + - python=3.7.* # tools - pytest>=5.0.1 @@ -19,9 +19,9 @@ dependencies: - matplotlib=2.2.3 - nomkl - numexpr - - numpy=1.15.4 + - numpy=1.16.5 - openpyxl - - pyarrow>=0.13.0 + - pyarrow>=0.15.0 - pytables - python-dateutil==2.7.3 - pytz diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 5bbd0e2795d7e..4d745454afcab 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -23,9 +23,9 @@ dependencies: - matplotlib=2.2.* - moto - numexpr - - numpy=1.18.* + - numpy=1.16.* - openpyxl - - pyarrow=0.14 + - pyarrow=0.15 - pytables - python-dateutil - pytz diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-38.yaml similarity index 84% rename from ci/deps/azure-windows-36.yaml rename to ci/deps/azure-windows-38.yaml index 548660cabaa67..f428a6dadfaa2 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -3,7 +3,7 @@ channels: - conda-forge - defaults dependencies: - - python=3.6.* + - python=3.8.* # tools - cython>=0.29.16 @@ -16,13 +16,13 @@ dependencies: - blosc - bottleneck - fastparquet>=0.3.2 - - matplotlib=3.0.2 + - matplotlib=3.1.3 - numba - numexpr - - numpy=1.15.* + - numpy=1.18.* - openpyxl - jinja2 - - pyarrow>=0.13.0 + - pyarrow>=0.15.0 - pytables - python-dateutil - pytz diff --git a/ci/deps/travis-36-cov.yaml b/ci/deps/travis-37-cov.yaml similarity index 93% rename from ci/deps/travis-36-cov.yaml rename to ci/deps/travis-37-cov.yaml index 177e0d3f4c0af..3a0827a16f97a 100644 --- a/ci/deps/travis-36-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -3,7 +3,7 @@ channels: - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.7.* # tools - cython>=0.29.16 @@ -26,12 +26,12 @@ dependencies: - moto - nomkl - numexpr - - numpy=1.15.* + - numpy=1.16.* - odfpy - openpyxl - pandas-gbq - psycopg2 - - pyarrow>=0.13.0 + - pyarrow>=0.15.0 - pymysql - pytables - python-snappy diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-37-locale.yaml similarity index 85% rename from ci/deps/travis-36-locale.yaml rename to ci/deps/travis-37-locale.yaml index 03a1e751b6a86..4427c1d940bf2 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -3,7 +3,7 @@ channels: - defaults - conda-forge dependencies: - - python=3.6.* + - python=3.7.* # tools - cython>=0.29.16 @@ -19,7 +19,7 @@ dependencies: - html5lib - ipython - jinja2 - - lxml=3.8.0 + - lxml=4.3.0 - matplotlib=3.0.* - moto - nomkl @@ -27,14 +27,14 @@ dependencies: - numpy - openpyxl - pandas-gbq=0.12.0 - - psycopg2=2.6.2 + - psycopg2=2.7 - pymysql=0.7.11 - pytables - python-dateutil - pytz - scipy - - sqlalchemy=1.1.4 - - xarray=0.10 + - sqlalchemy=1.3.0 + - xarray=0.12.0 - xlrd - xlsxwriter - xlwt diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index b79a9cd872c47..7ab150394bf51 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -18,7 +18,7 @@ Instructions for installing from source, Python version support ---------------------- -Officially Python 3.6.1 and above, 3.7, and 3.8. +Officially Python 3.7.1 and above, and 3.8. Installing pandas ----------------- @@ -220,9 +220,9 @@ Dependencies Package Minimum supported version ================================================================ ========================== `setuptools `__ 24.2.0 -`NumPy `__ 1.15.4 +`NumPy `__ 1.16.5 `python-dateutil `__ 2.7.3 -`pytz `__ 2017.2 +`pytz `__ 2017.3 ================================================================ ========================== .. _install.recommended_dependencies: @@ -232,7 +232,7 @@ Recommended dependencies * `numexpr `__: for accelerating certain numerical operations. ``numexpr`` uses multiple cores as well as smart chunking and caching to achieve large speedups. - If installed, must be Version 2.6.2 or higher. + If installed, must be Version 2.6.8 or higher. * `bottleneck `__: for accelerating certain types of ``nan`` evaluations. ``bottleneck`` uses specialized cython routines to achieve large speedups. If installed, @@ -259,36 +259,36 @@ the method requiring that dependency is called. Dependency Minimum Version Notes ========================= ================== ============================================================= BeautifulSoup4 4.6.0 HTML parser for read_html (see :ref:`note `) -Jinja2 Conditional formatting with DataFrame.style +Jinja2 2.10 Conditional formatting with DataFrame.style PyQt4 Clipboard I/O PyQt5 Clipboard I/O -PyTables 3.4.3 HDF5-based reading / writing -SQLAlchemy 1.1.4 SQL support for databases other than sqlite -SciPy 0.19.0 Miscellaneous statistical functions -XLsxWriter 0.9.8 Excel writing -blosc Compression for HDF5 +PyTables 3.4.4 HDF5-based reading / writing +SQLAlchemy 1.2.8 SQL support for databases other than sqlite +SciPy 1.12.0 Miscellaneous statistical functions +xlsxwriter 1.0.2 Excel writing +blosc 1.14.3 Compression for HDF5 fsspec 0.7.4 Handling files aside from local and HTTP fastparquet 0.3.2 Parquet reading / writing gcsfs 0.6.0 Google Cloud Storage access -html5lib HTML parser for read_html (see :ref:`note `) -lxml 3.8.0 HTML parser for read_html (see :ref:`note `) -matplotlib 2.2.2 Visualization +html5lib 1.0.1 HTML parser for read_html (see :ref:`note `) +lxml 4.3.0 HTML parser for read_html (see :ref:`note `) +matplotlib 2.2.3 Visualization numba 0.46.0 Alternative execution engine for rolling operations openpyxl 2.5.7 Reading / writing for xlsx files pandas-gbq 0.12.0 Google Big Query access -psycopg2 PostgreSQL engine for sqlalchemy -pyarrow 0.12.0 Parquet, ORC (requires 0.13.0), and feather reading / writing +psycopg2 2.7 PostgreSQL engine for sqlalchemy +pyarrow 0.15.0 Parquet, ORC, and feather reading / writing pymysql 0.7.11 MySQL engine for sqlalchemy pyreadstat SPSS files (.sav) reading -pytables 3.4.3 HDF5 reading / writing +pytables 3.4.4 HDF5 reading / writing pyxlsb 1.0.6 Reading for xlsb files qtpy Clipboard I/O s3fs 0.4.0 Amazon S3 access tabulate 0.8.3 Printing in Markdown-friendly format (see `tabulate`_) -xarray 0.8.2 pandas-like API for N-dimensional data +xarray 0.12.0 pandas-like API for N-dimensional data xclip Clipboard I/O on linux xlrd 1.1.0 Excel reading -xlwt 1.2.0 Excel writing +xlwt 1.3.0 Excel writing xsel Clipboard I/O on linux zlib Compression for HDF5 ========================= ================== ============================================================= diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ebc8ebf0dc2f7..86f47a5826214 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -54,6 +54,84 @@ Other enhancements - - +.. _whatsnew_120.api_breaking.python: + +Increased minimum version for Python +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas 1.2.0 supports Python 3.7.1 and higher (:issue:`35214`). + +.. _whatsnew_120.api_breaking.deps: + +Increased minimum versions for dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Some minimum supported versions of dependencies were updated (:issue:`35214`). +If installed, we now require: + ++-----------------+-----------------+----------+---------+ +| Package | Minimum Version | Required | Changed | ++=================+=================+==========+=========+ +| numpy | 1.16.5 | X | X | ++-----------------+-----------------+----------+---------+ +| pytz | 2017.3 | X | X | ++-----------------+-----------------+----------+---------+ +| python-dateutil | 2.7.3 | X | | ++-----------------+-----------------+----------+---------+ +| bottleneck | 1.2.1 | | | ++-----------------+-----------------+----------+---------+ +| numexpr | 2.6.8 | | X | ++-----------------+-----------------+----------+---------+ +| pytest (dev) | 5.0.1 | | X | ++-----------------+-----------------+----------+---------+ + +For `optional libraries `_ the general recommendation is to use the latest version. +The following table lists the lowest version per library that is currently being tested throughout the development of pandas. +Optional libraries below the lowest tested version may still work, but are not considered supported. + ++-----------------+-----------------+---------+ +| Package | Minimum Version | Changed | ++=================+=================+=========+ +| beautifulsoup4 | 4.6.0 | | ++-----------------+-----------------+---------+ +| fastparquet | 0.3.2 | | ++-----------------+-----------------+---------+ +| fsspec | 0.7.4 | | ++-----------------+-----------------+---------+ +| gcsfs | 0.6.0 | | ++-----------------+-----------------+---------+ +| lxml | 4.3.0 | X | ++-----------------+-----------------+---------+ +| matplotlib | 2.2.3 | X | ++-----------------+-----------------+---------+ +| numba | 0.46.0 | | ++-----------------+-----------------+---------+ +| openpyxl | 2.5.7 | | ++-----------------+-----------------+---------+ +| pyarrow | 0.15.0 | X | ++-----------------+-----------------+---------+ +| pymysql | 0.7.11 | X | ++-----------------+-----------------+---------+ +| pytables | 3.4.4 | X | ++-----------------+-----------------+---------+ +| s3fs | 0.4.0 | | ++-----------------+-----------------+---------+ +| scipy | 1.2.0 | | ++-----------------+-----------------+---------+ +| sqlalchemy | 1.2.8 | X | ++-----------------+-----------------+---------+ +| xarray | 0.12.0 | X | ++-----------------+-----------------+---------+ +| xlrd | 1.1.0 | | ++-----------------+-----------------+---------+ +| xlsxwriter | 1.0.2 | X | ++-----------------+-----------------+---------+ +| xlwt | 1.3.0 | X | ++-----------------+-----------------+---------+ +| pandas-gbq | 0.12.0 | | ++-----------------+-----------------+---------+ + +See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for more. .. --------------------------------------------------------------------------- diff --git a/environment.yml b/environment.yml index ed9762e5b8893..1e51470d43d36 100644 --- a/environment.yml +++ b/environment.yml @@ -4,7 +4,7 @@ channels: dependencies: # required # Pin numpy<1.19 until MPL 3.3.0 is released. - - numpy>=1.15,<1.19.0 + - numpy>=1.16.5,<1.19.0 - python=3 - python-dateutil>=2.7.3 - pytz @@ -93,11 +93,11 @@ dependencies: - odfpy - fastparquet>=0.3.2 # pandas.read_parquet, DataFrame.to_parquet - - pyarrow>=0.13.1 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather + - pyarrow>=0.15.0 # pandas.read_parquet, DataFrame.to_parquet, pandas.read_feather, DataFrame.to_feather - python-snappy # required by pyarrow - pyqt>=5.9.2 # pandas.read_clipboard - - pytables>=3.4.3 # pandas.read_hdf, DataFrame.to_hdf + - pytables>=3.4.4 # pandas.read_hdf, DataFrame.to_hdf - s3fs>=0.4.0 # file IO when using 's3://...' path - fsspec>=0.7.4 # for generic remote file operations - gcsfs>=0.6.0 # file IO when using 'gcs://...' path diff --git a/pandas/__init__.py b/pandas/__init__.py index d6584bf4f1c4f..36576da74c75d 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -20,7 +20,6 @@ # numpy compat from pandas.compat.numpy import ( - _np_version_under1p16, _np_version_under1p17, _np_version_under1p18, _is_numpy_dev, @@ -185,181 +184,76 @@ __git_version__ = v.get("full-revisionid") del get_versions, v + # GH 27101 # TODO: remove Panel compat in 1.0 -if pandas.compat.PY37: - - def __getattr__(name): - import warnings - - if name == "Panel": - - warnings.warn( - "The Panel class is removed from pandas. Accessing it " - "from the top-level namespace will also be removed in the next version", - FutureWarning, - stacklevel=2, - ) - - class Panel: - pass - - return Panel - - elif name == "datetime": - warnings.warn( - "The pandas.datetime class is deprecated " - "and will be removed from pandas in a future version. " - "Import from datetime module instead.", - FutureWarning, - stacklevel=2, - ) - - from datetime import datetime as dt - - return dt - - elif name == "np": - - warnings.warn( - "The pandas.np module is deprecated " - "and will be removed from pandas in a future version. " - "Import numpy directly instead", - FutureWarning, - stacklevel=2, - ) - import numpy as np - - return np - - elif name in {"SparseSeries", "SparseDataFrame"}: - warnings.warn( - f"The {name} class is removed from pandas. Accessing it from " - "the top-level namespace will also be removed in the next version", - FutureWarning, - stacklevel=2, - ) - - return type(name, (), {}) - - elif name == "SparseArray": - - warnings.warn( - "The pandas.SparseArray class is deprecated " - "and will be removed from pandas in a future version. " - "Use pandas.arrays.SparseArray instead.", - FutureWarning, - stacklevel=2, - ) - from pandas.core.arrays.sparse import SparseArray as _SparseArray +def __getattr__(name): + import warnings - return _SparseArray + if name == "Panel": - raise AttributeError(f"module 'pandas' has no attribute '{name}'") + warnings.warn( + "The Panel class is removed from pandas. Accessing it " + "from the top-level namespace will also be removed in the next version", + FutureWarning, + stacklevel=2, + ) + class Panel: + pass -else: + return Panel - class Panel: - pass - - class SparseDataFrame: - pass - - class SparseSeries: - pass - - class __numpy: - def __init__(self): - import numpy as np - import warnings - - self.np = np - self.warnings = warnings - - def __getattr__(self, item): - self.warnings.warn( - "The pandas.np module is deprecated " - "and will be removed from pandas in a future version. " - "Import numpy directly instead", - FutureWarning, - stacklevel=2, - ) - - try: - return getattr(self.np, item) - except AttributeError as err: - raise AttributeError(f"module numpy has no attribute {item}") from err - - np = __numpy() - - class __Datetime(type): + elif name == "datetime": + warnings.warn( + "The pandas.datetime class is deprecated " + "and will be removed from pandas in a future version. " + "Import from datetime module instead.", + FutureWarning, + stacklevel=2, + ) from datetime import datetime as dt - datetime = dt - - def __getattr__(cls, item): - cls.emit_warning() - - try: - return getattr(cls.datetime, item) - except AttributeError as err: - raise AttributeError( - f"module datetime has no attribute {item}" - ) from err - - def __instancecheck__(cls, other): - return isinstance(other, cls.datetime) - - class __DatetimeSub(metaclass=__Datetime): - def emit_warning(dummy=0): - import warnings - - warnings.warn( - "The pandas.datetime class is deprecated " - "and will be removed from pandas in a future version. " - "Import from datetime instead.", - FutureWarning, - stacklevel=3, - ) - - def __new__(cls, *args, **kwargs): - cls.emit_warning() - from datetime import datetime as dt - - return dt(*args, **kwargs) - - datetime = __DatetimeSub + return dt - class __SparseArray(type): + elif name == "np": - from pandas.core.arrays.sparse import SparseArray as sa + warnings.warn( + "The pandas.np module is deprecated " + "and will be removed from pandas in a future version. " + "Import numpy directly instead", + FutureWarning, + stacklevel=2, + ) + import numpy as np - SparseArray = sa + return np - def __instancecheck__(cls, other): - return isinstance(other, cls.SparseArray) + elif name in {"SparseSeries", "SparseDataFrame"}: + warnings.warn( + f"The {name} class is removed from pandas. Accessing it from " + "the top-level namespace will also be removed in the next version", + FutureWarning, + stacklevel=2, + ) - class __SparseArraySub(metaclass=__SparseArray): - def emit_warning(dummy=0): - import warnings + return type(name, (), {}) - warnings.warn( - "The pandas.SparseArray class is deprecated " - "and will be removed from pandas in a future version. " - "Use pandas.arrays.SparseArray instead.", - FutureWarning, - stacklevel=3, - ) + elif name == "SparseArray": - def __new__(cls, *args, **kwargs): - cls.emit_warning() - from pandas.core.arrays.sparse import SparseArray as sa + warnings.warn( + "The pandas.SparseArray class is deprecated " + "and will be removed from pandas in a future version. " + "Use pandas.arrays.SparseArray instead.", + FutureWarning, + stacklevel=2, + ) + from pandas.core.arrays.sparse import SparseArray as _SparseArray - return sa(*args, **kwargs) + return _SparseArray - SparseArray = __SparseArraySub + raise AttributeError(f"module 'pandas' has no attribute '{name}'") # module level doc-string diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index b5a1dc2b2fb94..ab2835932c95d 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -14,7 +14,6 @@ from pandas._typing import F -PY37 = sys.version_info >= (3, 7) PY38 = sys.version_info >= (3, 8) PY39 = sys.version_info >= (3, 9) PYPY = platform.python_implementation() == "PyPy" diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 789a4668b6fee..08d06da93bb45 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -8,19 +8,19 @@ # numpy versioning _np_version = np.__version__ _nlv = LooseVersion(_np_version) -_np_version_under1p16 = _nlv < LooseVersion("1.16") _np_version_under1p17 = _nlv < LooseVersion("1.17") _np_version_under1p18 = _nlv < LooseVersion("1.18") _np_version_under1p19 = _nlv < LooseVersion("1.19") _np_version_under1p20 = _nlv < LooseVersion("1.20") _is_numpy_dev = ".dev" in str(_nlv) +_min_numpy_ver = "1.16.5" -if _nlv < "1.15.4": +if _nlv < _min_numpy_ver: raise ImportError( - "this version of pandas is incompatible with numpy < 1.15.4\n" + f"this version of pandas is incompatible with numpy < {_min_numpy_ver}\n" f"your numpy version is {_np_version}.\n" - "Please upgrade numpy to >= 1.15.4 to use this pandas version" + f"Please upgrade numpy to >= {_min_numpy_ver} to use this pandas version" ) @@ -65,7 +65,6 @@ def np_array_datetime64_compat(arr, *args, **kwargs): __all__ = [ "np", "_np_version", - "_np_version_under1p16", "_np_version_under1p17", "_is_numpy_dev", ] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9d0751fcce460..547d86f221b5f 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -58,7 +58,6 @@ StorageOptions, ValueKeyFunc, ) -from pandas.compat import PY37 from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import ( @@ -1088,9 +1087,7 @@ def itertuples(self, index=True, name="Pandas"): # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) - # Python versions before 3.7 support at most 255 arguments to constructors - can_return_named_tuples = PY37 or len(self.columns) + index < 255 - if name is not None and can_return_named_tuples: + if name is not None: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index caa348d3a1fb9..1d25336cd3b70 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -5,7 +5,7 @@ import pytest import pandas as pd -from pandas import api, compat +from pandas import api import pandas._testing as tm @@ -100,11 +100,6 @@ class TestPDApi(Base): # these should be deprecated in the future deprecated_classes_in_future: List[str] = ["SparseArray"] - if not compat.PY37: - classes.extend(["Panel", "SparseSeries", "SparseDataFrame"]) - # deprecated_modules.extend(["np", "datetime"]) - # deprecated_classes_in_future.extend(["SparseArray"]) - # external modules exposed in pandas namespace modules: List[str] = [] @@ -193,7 +188,6 @@ class TestPDApi(Base): "_hashtable", "_lib", "_libs", - "_np_version_under1p16", "_np_version_under1p17", "_np_version_under1p18", "_is_numpy_dev", @@ -217,14 +211,6 @@ def test_api(self): + self.funcs_to + self.private_modules ) - if not compat.PY37: - checkthese.extend( - self.deprecated_modules - + self.deprecated_classes - + self.deprecated_classes_in_future - + self.deprecated_funcs_in_future - + self.deprecated_funcs - ) self.check(pd, checkthese, self.ignored) def test_depr(self): @@ -237,14 +223,7 @@ def test_depr(self): ) for depr in deprecated_list: with tm.assert_produces_warning(FutureWarning): - deprecated = getattr(pd, depr) - if not compat.PY37: - if depr == "datetime": - deprecated.__getattr__(dir(pd.datetime.datetime)[-1]) - elif depr == "SparseArray": - deprecated([]) - else: - deprecated.__getattr__(dir(deprecated)[-1]) + _ = getattr(pd, depr) def test_datetime(): diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index ca942c9288898..89fbfbd5b8324 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -3,8 +3,6 @@ import numpy as np import pytest -from pandas.compat.numpy import _np_version_under1p16 - from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype from pandas.core.dtypes.dtypes import CategoricalDtype @@ -637,7 +635,6 @@ def test_constructor_imaginary(self): tm.assert_index_equal(c1.categories, Index(values)) tm.assert_numpy_array_equal(np.array(c1), np.array(values)) - @pytest.mark.skipif(_np_version_under1p16, reason="Skipping for NumPy <1.16") def test_constructor_string_and_tuples(self): # GH 21416 c = pd.Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object)) diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index 7841360e568ed..12426a0c92c55 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -1,8 +1,6 @@ import numpy as np import pytest -from pandas.compat import PY37 - import pandas as pd import pandas._testing as tm from pandas.tests.extension import base @@ -62,13 +60,11 @@ def test_from_dtype(self, data): def test_from_sequence_from_cls(self, data): super().test_from_sequence_from_cls(data) - @pytest.mark.skipif(not PY37, reason="timeout on Linux py36_locale") @pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899") def test_series_constructor_no_data_with_index(self, dtype, na_value): # pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays super().test_series_constructor_no_data_with_index(dtype, na_value) - @pytest.mark.skipif(not PY37, reason="timeout on Linux py36_locale") @pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899") def test_series_constructor_scalar_na_with_index(self, dtype, na_value): # pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 78000c0252375..b9219f9f833de 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -1,8 +1,6 @@ import numpy as np import pytest -from pandas.compat.numpy import _np_version_under1p16 - import pandas as pd import pandas._testing as tm from pandas.core.arrays.numpy_ import PandasArray, PandasDtype @@ -46,11 +44,7 @@ def data(allow_in_pandas, dtype): @pytest.fixture def data_missing(allow_in_pandas, dtype): - # For NumPy <1.16, np.array([np.nan, (1,)]) raises - # ValueError: setting an array element with a sequence. if dtype.numpy_dtype == "object": - if _np_version_under1p16: - raise pytest.skip("Skipping for NumPy <1.16") return PandasArray(np.array([np.nan, (1,)], dtype=object)) return PandasArray(np.array([np.nan, 1.0])) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index cc57a3970d18b..2fb1f7f911a9c 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -6,7 +6,6 @@ import numpy as np import pytest -from pandas.compat import PY37 from pandas.util._test_decorators import async_mark, skip_if_no import pandas as pd @@ -274,10 +273,7 @@ def test_itertuples(self, float_frame): # will raise SyntaxError if trying to create namedtuple tup3 = next(df3.itertuples()) assert isinstance(tup3, tuple) - if PY37: - assert hasattr(tup3, "_fields") - else: - assert not hasattr(tup3, "_fields") + assert hasattr(tup3, "_fields") # GH 28282 df_254_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(254)}]) @@ -288,12 +284,7 @@ def test_itertuples(self, float_frame): df_255_columns = DataFrame([{f"foo_{i}": f"bar_{i}" for i in range(255)}]) result_255_columns = next(df_255_columns.itertuples(index=False)) assert isinstance(result_255_columns, tuple) - - # Dataframes with >=255 columns will fallback to regular tuples on python < 3.7 - if PY37: - assert hasattr(result_255_columns, "_fields") - else: - assert not hasattr(result_255_columns, "_fields") + assert hasattr(result_255_columns, "_fields") def test_sequence_like_with_categorical(self): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index d0f774344a33d..c8f5b2b0f6364 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -10,7 +10,7 @@ import pytest import pytz -from pandas.compat import PY37, is_platform_little_endian +from pandas.compat import is_platform_little_endian from pandas.compat.numpy import _np_version_under1p19 from pandas.core.dtypes.common import is_integer_dtype @@ -1418,7 +1418,6 @@ def test_constructor_list_of_namedtuples(self): result = DataFrame(tuples, columns=["y", "z"]) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(not PY37, reason="Requires Python >= 3.7") def test_constructor_list_of_dataclasses(self): # GH21910 from dataclasses import make_dataclass @@ -1430,7 +1429,6 @@ def test_constructor_list_of_dataclasses(self): result = DataFrame(datas) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(not PY37, reason="Requires Python >= 3.7") def test_constructor_list_of_dataclasses_with_varying_types(self): # GH21910 from dataclasses import make_dataclass @@ -1447,7 +1445,6 @@ def test_constructor_list_of_dataclasses_with_varying_types(self): result = DataFrame(datas) tm.assert_frame_equal(result, expected) - @pytest.mark.skipif(not PY37, reason="Requires Python >= 3.7") def test_constructor_list_of_dataclasses_error_thrown(self): # GH21910 from dataclasses import make_dataclass diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index c74c1529eb537..13a32e285e70a 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -3,8 +3,6 @@ import numpy as np import pytest -from pandas.compat import PY37, is_platform_windows - import pandas as pd from pandas import ( Categorical, @@ -13,7 +11,6 @@ Index, MultiIndex, Series, - _np_version_under1p17, qcut, ) import pandas._testing as tm @@ -244,12 +241,6 @@ def test_level_get_group(observed): tm.assert_frame_equal(result, expected) -# GH#21636 flaky on py37; may be related to older numpy, see discussion -# https://github.com/MacPython/pandas-wheels/pull/64 -@pytest.mark.xfail( - PY37 and _np_version_under1p17 and not is_platform_windows(), - reason="Flaky, GH-27902", -) @pytest.mark.parametrize("ordered", [True, False]) def test_apply(ordered): # GH 10138 diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 22b4ec189a0f1..8f1ed193b100f 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -256,6 +256,9 @@ def test_read_json_from_to_json_results(self): tm.assert_frame_equal(result1, df) tm.assert_frame_equal(result2, df) + @pytest.mark.filterwarnings( + "ignore:an integer is required (got type float)*:DeprecationWarning" + ) def test_to_json(self): df = self.df.copy() df.index.name = "idx" @@ -432,6 +435,9 @@ def test_to_json_categorical_index(self): assert result == expected + @pytest.mark.filterwarnings( + "ignore:an integer is required (got type float)*:DeprecationWarning" + ) def test_date_format_raises(self): with pytest.raises(ValueError): self.df.to_json(orient="table", date_format="epoch") diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index c4db0170ecc90..1280d0fd434d5 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -35,6 +35,9 @@ def assert_json_roundtrip_equal(result, expected, orient): tm.assert_frame_equal(result, expected) +@pytest.mark.filterwarnings( + "ignore:an integer is required (got type float)*:DeprecationWarning" +) @pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning") class TestPandasContainer: @pytest.fixture(autouse=True) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 5154a9ba6fdf0..c84c0048cc838 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1138,6 +1138,7 @@ def test_parse_integers_above_fp_precision(all_parsers): tm.assert_frame_equal(result, expected) +@pytest.mark.skip("unreliable test #35214") def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] @@ -1151,6 +1152,7 @@ def test_chunks_have_consistent_numerical_type(all_parsers): assert result.a.dtype == float +@pytest.mark.skip("unreliable test #35214") def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 03830019affa1..09d5d9c1677d0 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -308,10 +308,6 @@ def test_overlap_public_nat_methods(klass, expected): # In case when Timestamp, Timedelta, and NaT are overlap, the overlap # is considered to be with Timestamp and NaT, not Timedelta. - # "fromisoformat" was introduced in 3.7 - if klass is Timestamp and not compat.PY37: - expected.remove("fromisoformat") - # "fromisocalendar" was introduced in 3.8 if klass is Timestamp and not compat.PY38: expected.remove("fromisocalendar") diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 8c51908c547f4..d1ab797056ece 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -14,7 +14,6 @@ import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import ApplyTypeError, _get_offset, _offset_map from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG -import pandas.compat as compat from pandas.compat.numpy import np_datetime64_compat from pandas.errors import PerformanceWarning @@ -744,10 +743,7 @@ def test_repr(self): assert repr(self.offset) == "" assert repr(self.offset2) == "<2 * BusinessDays>" - if compat.PY37: - expected = "" - else: - expected = "" + expected = "" assert repr(self.offset + timedelta(1)) == expected def test_with_offset(self): @@ -2636,10 +2632,7 @@ def test_repr(self): assert repr(self.offset) == "" assert repr(self.offset2) == "<2 * CustomBusinessDays>" - if compat.PY37: - expected = "" - else: - expected = "" + expected = "" assert repr(self.offset + timedelta(1)) == expected def test_with_offset(self): diff --git a/pandas/util/__init__.py b/pandas/util/__init__.py index b5271dbc0443e..9f2bf156b7e37 100644 --- a/pandas/util/__init__.py +++ b/pandas/util/__init__.py @@ -1,30 +1,12 @@ from pandas.util._decorators import Appender, Substitution, cache_readonly # noqa -from pandas import compat from pandas.core.util.hashing import hash_array, hash_pandas_object # noqa -# compatibility for import pandas; pandas.util.testing -if compat.PY37: +def __getattr__(name): + if name == "testing": + import pandas.util.testing - def __getattr__(name): - if name == "testing": - import pandas.util.testing - - return pandas.util.testing - else: - raise AttributeError(f"module 'pandas.util' has no attribute '{name}'") - - -else: - - class _testing: - def __getattr__(self, item): - import pandas.util.testing - - return getattr(pandas.util.testing, item) - - testing = _testing() - - -del compat + return pandas.util.testing + else: + raise AttributeError(f"module 'pandas.util' has no attribute '{name}'") diff --git a/pyproject.toml b/pyproject.toml index f282f2a085000..f6f8081b6c464 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,16 +5,14 @@ requires = [ "setuptools", "wheel", "Cython>=0.29.16,<3", # Note: sync with setup.py - "numpy==1.15.4; python_version=='3.6' and platform_system!='AIX'", - "numpy==1.15.4; python_version=='3.7' and platform_system!='AIX'", + "numpy==1.16.5; python_version=='3.7' and platform_system!='AIX'", "numpy==1.17.3; python_version>='3.8' and platform_system!='AIX'", - "numpy==1.16.0; python_version=='3.6' and platform_system=='AIX'", - "numpy==1.16.0; python_version=='3.7' and platform_system=='AIX'", + "numpy==1.16.5; python_version=='3.7' and platform_system=='AIX'", "numpy==1.17.3; python_version>='3.8' and platform_system=='AIX'", ] [tool.black] -target-version = ['py36', 'py37', 'py38'] +target-version = ['py37', 'py38'] exclude = ''' ( asv_bench/env diff --git a/requirements-dev.txt b/requirements-dev.txt index 6a87b0a99a4f8..66e72641cd5bb 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ # This file is auto-generated from environment.yml, do not modify. # See that file for comments about the need/usage of each dependency. -numpy>=1.15,<1.19.0 +numpy>=1.16.5,<1.19.0 python-dateutil>=2.7.3 pytz asv @@ -60,10 +60,10 @@ xlsxwriter xlwt odfpy fastparquet>=0.3.2 -pyarrow>=0.13.1 +pyarrow>=0.15.0 python-snappy pyqt5>=5.9.2 -tables>=3.4.3 +tables>=3.4.4 s3fs>=0.4.0 fsspec>=0.7.4 gcsfs>=0.6.0 diff --git a/setup.py b/setup.py index aebbdbf4d1e96..43d19d525876b 100755 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ def is_platform_mac(): return sys.platform == "darwin" -min_numpy_ver = "1.15.4" +min_numpy_ver = "1.16.5" min_cython_ver = "0.29.16" # note: sync with pyproject.toml try: @@ -197,7 +197,6 @@ def build_extensions(self): "Intended Audience :: Science/Research", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Cython", @@ -742,7 +741,7 @@ def setup_package(): setuptools_kwargs = { "install_requires": [ "python-dateutil >= 2.7.3", - "pytz >= 2017.2", + "pytz >= 2017.3", f"numpy >= {min_numpy_ver}", ], "setup_requires": [f"numpy >= {min_numpy_ver}"], @@ -766,11 +765,11 @@ def setup_package(): long_description=LONG_DESCRIPTION, classifiers=CLASSIFIERS, platforms="any", - python_requires=">=3.6.1", + python_requires=">=3.7.1", extras_require={ "test": [ # sync with setup.cfg minversion & install.rst - "pytest>=4.0.2", + "pytest>=5.0.1", "pytest-xdist", "hypothesis>=3.58", ] From 3c87b019b63ebff3c56acb6402fba5bc6f99671f Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Tue, 11 Aug 2020 16:57:56 -0500 Subject: [PATCH 255/632] CI/TST: change skip to xfail #35660 (#35672) --- pandas/tests/io/parser/test_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index c84c0048cc838..3d5f6ae3a4af9 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1138,7 +1138,7 @@ def test_parse_integers_above_fp_precision(all_parsers): tm.assert_frame_equal(result, expected) -@pytest.mark.skip("unreliable test #35214") +@pytest.mark.xfail(reason="ResourceWarning #35660", strict=False) def test_chunks_have_consistent_numerical_type(all_parsers): parser = all_parsers integers = [str(i) for i in range(499999)] @@ -1152,7 +1152,7 @@ def test_chunks_have_consistent_numerical_type(all_parsers): assert result.a.dtype == float -@pytest.mark.skip("unreliable test #35214") +@pytest.mark.xfail(reason="ResourceWarning #35660", strict=False) def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers From 59ffa251269ddb8b561d8d88f631a5db21f660dc Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 12 Aug 2020 13:22:54 +0100 Subject: [PATCH 256/632] CLN: consistent signatures for equals methods (#35636) --- pandas/_libs/sparse.pyx | 4 ++-- pandas/core/arrays/base.py | 14 ++++++++++---- pandas/core/arrays/categorical.py | 6 ++++-- pandas/core/generic.py | 4 +++- pandas/core/indexes/base.py | 2 +- pandas/core/indexes/category.py | 2 +- pandas/core/indexes/datetimelike.py | 6 +++--- pandas/core/indexes/interval.py | 7 ++++--- pandas/core/indexes/multi.py | 11 +++++------ pandas/core/indexes/range.py | 2 +- pandas/core/internals/managers.py | 5 ++++- 11 files changed, 38 insertions(+), 25 deletions(-) diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index 321d7c374d8ec..0c3d8915b749b 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -103,7 +103,7 @@ cdef class IntIndex(SparseIndex): if not monotonic: raise ValueError("Indices must be strictly increasing") - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: if not isinstance(other, IntIndex): return False @@ -399,7 +399,7 @@ cdef class BlockIndex(SparseIndex): if blengths[i] == 0: raise ValueError(f'Zero-length block {i}') - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: if not isinstance(other, BlockIndex): return False diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 921927325a144..d85647edc3b81 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -7,7 +7,7 @@ without warning. """ import operator -from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, cast import numpy as np @@ -20,7 +20,12 @@ from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.cast import maybe_cast_to_extension_array -from pandas.core.dtypes.common import is_array_like, is_list_like, pandas_dtype +from pandas.core.dtypes.common import ( + is_array_like, + is_dtype_equal, + is_list_like, + pandas_dtype, +) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna @@ -742,7 +747,7 @@ def searchsorted(self, value, side="left", sorter=None): arr = self.astype(object) return arr.searchsorted(value, side=side, sorter=sorter) - def equals(self, other: "ExtensionArray") -> bool: + def equals(self, other: object) -> bool: """ Return if another array is equivalent to this array. @@ -762,7 +767,8 @@ def equals(self, other: "ExtensionArray") -> bool: """ if not type(self) == type(other): return False - elif not self.dtype == other.dtype: + other = cast(ExtensionArray, other) + if not is_dtype_equal(self.dtype, other.dtype): return False elif not len(self) == len(other): return False diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 6e5c7bc699962..a28b341669918 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2242,7 +2242,7 @@ def _from_factorized(cls, uniques, original): original.categories.take(uniques), dtype=original.dtype ) - def equals(self, other): + def equals(self, other: object) -> bool: """ Returns True if categorical arrays are equal. @@ -2254,7 +2254,9 @@ def equals(self, other): ------- bool """ - if self.is_dtype_equal(other): + if not isinstance(other, Categorical): + return False + elif self.is_dtype_equal(other): if self.categories.equals(other.categories): # fastpath to avoid re-coding other_codes = other._codes diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 520023050d49d..11147bffa32c3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -22,6 +22,7 @@ Tuple, Type, Union, + cast, ) import warnings import weakref @@ -1196,7 +1197,7 @@ def _indexed_same(self, other) -> bool: self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) - def equals(self, other): + def equals(self, other: object) -> bool: """ Test whether two objects contain the same elements. @@ -1276,6 +1277,7 @@ def equals(self, other): """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False + other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ecd3670e724a1..623ce68201492 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4176,7 +4176,7 @@ def putmask(self, mask, value): # coerces to object return self.astype(object).putmask(mask, value) - def equals(self, other: Any) -> bool: + def equals(self, other: object) -> bool: """ Determine if two Index object are equal. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index fb283cbe02954..4990e6a8e20e9 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -290,7 +290,7 @@ def _is_dtype_compat(self, other) -> bool: return other - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: """ Determine if two CategoricalIndex objects contain the same elements. diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8ccdab21339df..6d9d75a69e91d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -24,7 +24,7 @@ is_scalar, ) from pandas.core.dtypes.concat import concat_compat -from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries +from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core import algorithms from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray @@ -130,14 +130,14 @@ def __array_wrap__(self, result, context=None): # ------------------------------------------------------------------------ - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: """ Determines if two Index objects contain the same elements. """ if self.is_(other): return True - if not isinstance(other, ABCIndexClass): + if not isinstance(other, Index): return False elif not isinstance(other, type(self)): try: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9548ebbd9c3b2..e8d0a44324cc5 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1005,19 +1005,20 @@ def _format_space(self) -> str: def argsort(self, *args, **kwargs) -> np.ndarray: return np.lexsort((self.right, self.left)) - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: """ Determines if two IntervalIndex objects contain the same elements. """ if self.is_(other): return True - # if we can coerce to an II - # then we can compare + # if we can coerce to an IntervalIndex then we can compare if not isinstance(other, IntervalIndex): if not is_interval_dtype(other): return False other = Index(other) + if not isinstance(other, IntervalIndex): + return False return ( self.left.equals(other.left) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 448c2dfe4a29d..ffbd03d0c3ba7 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3221,7 +3221,7 @@ def truncate(self, before=None, after=None): verify_integrity=False, ) - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: """ Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) @@ -3264,11 +3264,10 @@ def equals(self, other) -> bool: np.asarray(other.levels[i]._values), other_codes, allow_fill=False ) - # since we use NaT both datetime64 and timedelta64 - # we can have a situation where a level is typed say - # timedelta64 in self (IOW it has other values than NaT) - # but types datetime64 in other (where its all NaT) - # but these are equivalent + # since we use NaT both datetime64 and timedelta64 we can have a + # situation where a level is typed say timedelta64 in self (IOW it + # has other values than NaT) but types datetime64 in other (where + # its all NaT) but these are equivalent if len(self_values) == 0 and len(other_values) == 0: continue diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 3577a7aacc008..6080c32052266 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -433,7 +433,7 @@ def argsort(self, *args, **kwargs) -> np.ndarray: else: return np.arange(len(self) - 1, -1, -1) - def equals(self, other) -> bool: + def equals(self, other: object) -> bool: """ Determines if two Index objects contain the same elements. """ diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index aa74d173d69b3..371b721f08b27 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1437,7 +1437,10 @@ def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True ) - def equals(self, other: "BlockManager") -> bool: + def equals(self, other: object) -> bool: + if not isinstance(other, BlockManager): + return False + self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): return False From bf8e9efe0f1ae09dc39e13e2f56c82d503138b06 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Wed, 12 Aug 2020 15:14:30 -0700 Subject: [PATCH 257/632] BUG: Support custom BaseIndexers in groupby.rolling (#35647) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/window/indexers.py | 14 ++++++++++---- pandas/core/window/rolling.py | 15 +++++++++++---- pandas/tests/window/test_grouper.py | 23 +++++++++++++++++++++++ 4 files changed, 45 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 415f9e508feb8..cdc244ca193b4 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index bc36bdca982e8..7cbe34cdebf9f 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -1,6 +1,6 @@ """Indexer objects for computing start/end window bounds for rolling operations""" from datetime import timedelta -from typing import Dict, Optional, Tuple, Type, Union +from typing import Dict, Optional, Tuple, Type import numpy as np @@ -265,7 +265,8 @@ def __init__( index_array: Optional[np.ndarray], window_size: int, groupby_indicies: Dict, - rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]], + rolling_indexer: Type[BaseIndexer], + indexer_kwargs: Optional[Dict], **kwargs, ): """ @@ -276,7 +277,10 @@ def __init__( """ self.groupby_indicies = groupby_indicies self.rolling_indexer = rolling_indexer - super().__init__(index_array, window_size, **kwargs) + self.indexer_kwargs = indexer_kwargs or {} + super().__init__( + index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs + ) @Appender(get_window_bounds_doc) def get_window_bounds( @@ -298,7 +302,9 @@ def get_window_bounds( else: index_array = self.index_array indexer = self.rolling_indexer( - index_array=index_array, window_size=self.window_size, + index_array=index_array, + window_size=self.window_size, + **self.indexer_kwargs, ) start, end = indexer.get_window_bounds( len(indicies), min_periods, center, closed diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 7347d5686aabc..0306d4de2fc73 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -145,7 +145,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin): def __init__( self, - obj, + obj: FrameOrSeries, window=None, min_periods: Optional[int] = None, center: bool = False, @@ -2271,10 +2271,16 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: ------- GroupbyRollingIndexer """ - rolling_indexer: Union[Type[FixedWindowIndexer], Type[VariableWindowIndexer]] - if self.is_freq_type: + rolling_indexer: Type[BaseIndexer] + indexer_kwargs: Optional[Dict] = None + index_array = self.obj.index.asi8 + if isinstance(self.window, BaseIndexer): + rolling_indexer = type(self.window) + indexer_kwargs = self.window.__dict__ + # We'll be using the index of each group later + indexer_kwargs.pop("index_array", None) + elif self.is_freq_type: rolling_indexer = VariableWindowIndexer - index_array = self.obj.index.asi8 else: rolling_indexer = FixedWindowIndexer index_array = None @@ -2283,6 +2289,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: window_size=window, groupby_indicies=self._groupby.indices, rolling_indexer=rolling_indexer, + indexer_kwargs=indexer_kwargs, ) return window_indexer diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index e1dcac06c39cc..a9590c7e1233a 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -305,6 +305,29 @@ def test_groupby_subselect_rolling(self): ) tm.assert_series_equal(result, expected) + def test_groupby_rolling_custom_indexer(self): + # GH 35557 + class SimpleIndexer(pd.api.indexers.BaseIndexer): + def get_window_bounds( + self, num_values=0, min_periods=None, center=None, closed=None + ): + min_periods = self.window_size if min_periods is None else 0 + end = np.arange(num_values, dtype=np.int64) + 1 + start = end.copy() - self.window_size + start[start < 0] = min_periods + return start, end + + df = pd.DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5 + ) + result = ( + df.groupby(df.index) + .rolling(SimpleIndexer(window_size=3), min_periods=1) + .sum() + ) + expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum() + tm.assert_frame_equal(result, expected) + def test_groupby_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame( From 8145ea690f496c915409096a770d5cb835f2ec3f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 12 Aug 2020 15:15:35 -0700 Subject: [PATCH 258/632] REF: _cython_agg_blocks follow patterns similar to _apply_blockwise (#35632) --- pandas/core/groupby/generic.py | 41 +++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 53242c0332a8c..b7280a9f7db3c 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1026,8 +1026,7 @@ def _cython_agg_blocks( if numeric_only: data = data.get_numeric_data(copy=False) - agg_blocks: List[Block] = [] - new_items: List[np.ndarray] = [] + agg_blocks: List["Block"] = [] deleted_items: List[np.ndarray] = [] no_result = object() @@ -1056,11 +1055,12 @@ def cast_result_block(result, block: "Block", how: str) -> "Block": # reshape to be valid for non-Extension Block result = result.reshape(1, -1) - agg_block: Block = block.make_block(result) + agg_block: "Block" = block.make_block(result) return agg_block - for block in data.blocks: - # Avoid inheriting result from earlier in the loop + def blk_func(block: "Block") -> List["Block"]: + new_blocks: List["Block"] = [] + result = no_result locs = block.mgr_locs.as_array try: @@ -1076,8 +1076,7 @@ def cast_result_block(result, block: "Block", how: str) -> "Block": # we cannot perform the operation # in an alternate way, exclude the block assert how == "ohlc" - deleted_items.append(locs) - continue + raise # call our grouper again with only this block obj = self.obj[data.items[locs]] @@ -1096,8 +1095,7 @@ def cast_result_block(result, block: "Block", how: str) -> "Block": except TypeError: # we may have an exception in trying to aggregate # continue and exclude the block - deleted_items.append(locs) - continue + raise else: result = cast(DataFrame, result) # unwrap DataFrame to get array @@ -1108,20 +1106,33 @@ def cast_result_block(result, block: "Block", how: str) -> "Block": # clean, we choose to clean up this mess later on. assert len(locs) == result.shape[1] for i, loc in enumerate(locs): - new_items.append(np.array([loc], dtype=locs.dtype)) agg_block = result.iloc[:, [i]]._mgr.blocks[0] - agg_blocks.append(agg_block) + new_blocks.append(agg_block) else: result = result._mgr.blocks[0].values if isinstance(result, np.ndarray) and result.ndim == 1: result = result.reshape(1, -1) agg_block = cast_result_block(result, block, how) - new_items.append(locs) - agg_blocks.append(agg_block) + new_blocks = [agg_block] else: agg_block = cast_result_block(result, block, how) - new_items.append(locs) - agg_blocks.append(agg_block) + new_blocks = [agg_block] + return new_blocks + + skipped: List[int] = [] + new_items: List[np.ndarray] = [] + for i, block in enumerate(data.blocks): + try: + nbs = blk_func(block) + except (NotImplementedError, TypeError): + # TypeError -> we may have an exception in trying to aggregate + # continue and exclude the block + # NotImplementedError -> "ohlc" with wrong dtype + skipped.append(i) + deleted_items.append(block.mgr_locs.as_array) + else: + agg_blocks.extend(nbs) + new_items.append(block.mgr_locs.as_array) if not agg_blocks: raise DataError("No numeric types to aggregate") From a0896e107ac03cd667b47e7be2059cd261b65938 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Wed, 12 Aug 2020 18:23:25 -0400 Subject: [PATCH 259/632] BUG: to_pickle/read_pickle do not close user-provided file objects (#35686) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/io/pickle.py | 8 ++++++-- pandas/tests/io/test_pickle.py | 9 +++++++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 86f47a5826214..deb5697053ea8 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -234,7 +234,7 @@ I/O - Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) -- +- :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) Plotting ^^^^^^^^ diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 549d55e65546d..eee6ec7c9feca 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -100,7 +100,9 @@ def to_pickle( try: f.write(pickle.dumps(obj, protocol=protocol)) finally: - f.close() + if f != filepath_or_buffer: + # do not close user-provided file objects GH 35679 + f.close() for _f in fh: _f.close() if should_close: @@ -215,7 +217,9 @@ def read_pickle( # e.g. can occur for files written in py27; see GH#28645 and GH#31988 return pc.load(f, encoding="latin-1") finally: - f.close() + if f != filepath_or_buffer: + # do not close user-provided file objects GH 35679 + f.close() for _f in fh: _f.close() if should_close: diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index e4d43db7834e3..6331113ab8945 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -183,6 +183,15 @@ def python_unpickler(path): result = python_unpickler(path) compare_element(result, expected, typ) + # and the same for file objects (GH 35679) + with open(path, mode="wb") as handle: + writer(expected, path) + handle.seek(0) # shouldn't close file handle + with open(path, mode="rb") as handle: + result = pd.read_pickle(handle) + handle.seek(0) # shouldn't close file handle + compare_element(result, expected, typ) + def test_pickle_path_pathlib(): df = tm.makeDataFrame() From 40795053aaf82f8f55abe56001b1276b1ef0a916 Mon Sep 17 00:00:00 2001 From: Yutaro Ikeda Date: Thu, 13 Aug 2020 19:15:51 +0900 Subject: [PATCH 260/632] BUG: GH-35558 merge_asof tolerance error (#35654) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/reshape/merge.py | 2 +- pandas/tests/reshape/merge/test_merge_asof.py | 22 +++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index cdc244ca193b4..b37103910afab 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 27b331babe692..2349cb1dcc0c7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1667,7 +1667,7 @@ def _get_merge_keys(self): msg = ( f"incompatible tolerance {self.tolerance}, must be compat " - f"with type {repr(lk.dtype)}" + f"with type {repr(lt.dtype)}" ) if needs_i8_conversion(lt): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 9b09f0033715d..895de2b748c34 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -1339,3 +1339,25 @@ def test_merge_index_column_tz(self): index=pd.Index([0, 1, 2, 3, 4]), ) tm.assert_frame_equal(result, expected) + + def test_left_index_right_index_tolerance(self): + # https://github.com/pandas-dev/pandas/issues/35558 + dr1 = pd.date_range( + start="1/1/2020", end="1/20/2020", freq="2D" + ) + pd.Timedelta(seconds=0.4) + dr2 = pd.date_range(start="1/1/2020", end="2/1/2020") + + df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1)) + df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2)) + + expected = pd.DataFrame( + {"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1) + ) + result = pd.merge_asof( + df1, + df2, + left_index=True, + right_index=True, + tolerance=pd.Timedelta(seconds=0.5), + ) + tm.assert_frame_equal(result, expected) From 288c8ed85c387ff3fef8458ee3ab9a3330645021 Mon Sep 17 00:00:00 2001 From: Elliot Rampono Date: Thu, 13 Aug 2020 12:21:46 -0400 Subject: [PATCH 261/632] Reorganize imports to be compliant with isort (and conventional) (#35708) --- web/pandas_web.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/web/pandas_web.py b/web/pandas_web.py index e62deaa8cdc7f..7dd63175e69ac 100755 --- a/web/pandas_web.py +++ b/web/pandas_web.py @@ -34,13 +34,12 @@ import time import typing +import feedparser import jinja2 +import markdown import requests import yaml -import feedparser -import markdown - class Preprocessors: """ From 7a675928f23839e9c9ddf3b049da51e5a20ce683 Mon Sep 17 00:00:00 2001 From: Elliot Rampono Date: Thu, 13 Aug 2020 13:52:32 -0400 Subject: [PATCH 262/632] add web/ directory to isort checks (#35709) Co-authored-by: elliot rampono --- ci/code_checks.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 816bb23865c04..852f66763683b 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -121,7 +121,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # Imports - Check formatting using isort see setup.cfg for settings MSG='Check import format using isort' ; echo $MSG - ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts" + ISORT_CMD="isort --quiet --check-only pandas asv_bench scripts web" if [[ "$GITHUB_ACTIONS" == "true" ]]; then eval $ISORT_CMD | awk '{print "##[error]" $0}'; RET=$(($RET + ${PIPESTATUS[0]})) else From e530066aa5b0b6fee333a45a33424efb218d6556 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Thu, 13 Aug 2020 19:06:35 +0100 Subject: [PATCH 263/632] PERF: make RangeIndex iterate over ._range (#35676) --- asv_bench/benchmarks/index_object.py | 24 +++++++++++++++-------- pandas/core/indexes/range.py | 4 ++++ pandas/tests/indexes/ranges/test_range.py | 4 ++++ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index b242de6a17208..9c05019c70396 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -57,8 +57,8 @@ def time_datetime_difference_disjoint(self): class Range: def setup(self): - self.idx_inc = RangeIndex(start=0, stop=10 ** 7, step=3) - self.idx_dec = RangeIndex(start=10 ** 7, stop=-1, step=-3) + self.idx_inc = RangeIndex(start=0, stop=10 ** 6, step=3) + self.idx_dec = RangeIndex(start=10 ** 6, stop=-1, step=-3) def time_max(self): self.idx_inc.max() @@ -73,15 +73,23 @@ def time_min_trivial(self): self.idx_inc.min() def time_get_loc_inc(self): - self.idx_inc.get_loc(900000) + self.idx_inc.get_loc(900_000) def time_get_loc_dec(self): - self.idx_dec.get_loc(100000) + self.idx_dec.get_loc(100_000) + + def time_iter_inc(self): + for _ in self.idx_inc: + pass + + def time_iter_dec(self): + for _ in self.idx_dec: + pass class IndexEquals: def setup(self): - idx_large_fast = RangeIndex(100000) + idx_large_fast = RangeIndex(100_000) idx_small_slow = date_range(start="1/1/2012", periods=1) self.mi_large_slow = MultiIndex.from_product([idx_large_fast, idx_small_slow]) @@ -94,7 +102,7 @@ def time_non_object_equals_multiindex(self): class IndexAppend: def setup(self): - N = 10000 + N = 10_000 self.range_idx = RangeIndex(0, 100) self.int_idx = self.range_idx.astype(int) self.obj_idx = self.int_idx.astype(str) @@ -168,7 +176,7 @@ def time_get_loc_non_unique_sorted(self, dtype): class Float64IndexMethod: # GH 13166 def setup(self): - N = 100000 + N = 100_000 a = np.arange(N) self.ind = Float64Index(a * 4.8000000418824129e-08) @@ -212,7 +220,7 @@ class GC: params = [1, 2, 5] def create_use_drop(self): - idx = Index(list(range(1000 * 1000))) + idx = Index(list(range(1_000_000))) idx._engine def peakmem_gc_instances(self, N): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 6080c32052266..c65c3d5ff3d9c 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -373,6 +373,10 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): def tolist(self): return list(self._range) + @doc(Int64Index.__iter__) + def __iter__(self): + yield from self._range + @doc(Int64Index._shallow_copy) def _shallow_copy(self, values=None, name: Label = no_default): name = self.name if name is no_default else name diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index ef4bb9a0869b0..c4c242746e92c 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -167,6 +167,10 @@ def test_cache(self): idx.any() assert idx._cache == {} + for _ in idx: + pass + assert idx._cache == {} + df = pd.DataFrame({"a": range(10)}, index=idx) df.loc[50] From 542b20aef5789b3eb90d49d3624c78997761c011 Mon Sep 17 00:00:00 2001 From: SylvainLan Date: Thu, 13 Aug 2020 20:17:39 +0200 Subject: [PATCH 264/632] Refactor tables latex (#35649) --- pandas/io/formats/latex.py | 103 +++++++---------------- pandas/tests/io/formats/test_to_latex.py | 3 +- 2 files changed, 33 insertions(+), 73 deletions(-) diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 5d6f0a08ef2b5..715b8bbdf5672 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -121,10 +121,7 @@ def pad_empties(x): else: column_format = self.column_format - if self.longtable: - self._write_longtable_begin(buf, column_format) - else: - self._write_tabular_begin(buf, column_format) + self._write_tabular_begin(buf, column_format) buf.write("\\toprule\n") @@ -190,10 +187,7 @@ def pad_empties(x): if self.multirow and i < len(strrows) - 1: self._print_cline(buf, i, len(strcols)) - if self.longtable: - self._write_longtable_end(buf) - else: - self._write_tabular_end(buf) + self._write_tabular_end(buf) def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]: r""" @@ -288,7 +282,7 @@ def _write_tabular_begin(self, buf, column_format: str): for 3 columns """ if self._table_float: - # then write output in a nested table/tabular environment + # then write output in a nested table/tabular or longtable environment if self.caption is None: caption_ = "" else: @@ -304,12 +298,27 @@ def _write_tabular_begin(self, buf, column_format: str): else: position_ = f"[{self.position}]" - buf.write(f"\\begin{{table}}{position_}\n\\centering{caption_}{label_}\n") + if self.longtable: + table_ = f"\\begin{{longtable}}{position_}{{{column_format}}}" + tabular_ = "\n" + else: + table_ = f"\\begin{{table}}{position_}\n\\centering" + tabular_ = f"\n\\begin{{tabular}}{{{column_format}}}\n" + + if self.longtable and (self.caption is not None or self.label is not None): + # a double-backslash is required at the end of the line + # as discussed here: + # https://tex.stackexchange.com/questions/219138 + backlash_ = "\\\\" + else: + backlash_ = "" + buf.write(f"{table_}{caption_}{label_}{backlash_}{tabular_}") else: - # then write output only in a tabular environment - pass - - buf.write(f"\\begin{{tabular}}{{{column_format}}}\n") + if self.longtable: + tabletype_ = "longtable" + else: + tabletype_ = "tabular" + buf.write(f"\\begin{{{tabletype_}}}{{{column_format}}}\n") def _write_tabular_end(self, buf): """ @@ -323,62 +332,12 @@ def _write_tabular_end(self, buf): a string. """ - buf.write("\\bottomrule\n") - buf.write("\\end{tabular}\n") - if self._table_float: - buf.write("\\end{table}\n") - else: - pass - - def _write_longtable_begin(self, buf, column_format: str): - """ - Write the beginning of a longtable environment including caption and - label if provided by user. - - Parameters - ---------- - buf : string or file handle - File path or object. If not specified, the result is returned as - a string. - column_format : str - The columns format as specified in `LaTeX table format - `__ e.g 'rcl' - for 3 columns - """ - if self.caption is None: - caption_ = "" - else: - caption_ = f"\\caption{{{self.caption}}}" - - if self.label is None: - label_ = "" - else: - label_ = f"\\label{{{self.label}}}" - - if self.position is None: - position_ = "" + if self.longtable: + buf.write("\\end{longtable}\n") else: - position_ = f"[{self.position}]" - - buf.write( - f"\\begin{{longtable}}{position_}{{{column_format}}}\n{caption_}{label_}" - ) - if self.caption is not None or self.label is not None: - # a double-backslash is required at the end of the line - # as discussed here: - # https://tex.stackexchange.com/questions/219138 - buf.write("\\\\\n") - - @staticmethod - def _write_longtable_end(buf): - """ - Write the end of a longtable environment. - - Parameters - ---------- - buf : string or file handle - File path or object. If not specified, the result is returned as - a string. - - """ - buf.write("\\end{longtable}\n") + buf.write("\\bottomrule\n") + buf.write("\\end{tabular}\n") + if self._table_float: + buf.write("\\end{table}\n") + else: + pass diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 93ad3739e59c7..96a9ed2b86cf4 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -555,7 +555,8 @@ def test_to_latex_longtable_caption_label(self): result_cl = df.to_latex(longtable=True, caption=the_caption, label=the_label) expected_cl = r"""\begin{longtable}{lrl} -\caption{a table in a \texttt{longtable} environment}\label{tab:longtable}\\ +\caption{a table in a \texttt{longtable} environment} +\label{tab:longtable}\\ \toprule {} & a & b \\ \midrule From ed23eb894d2ce1c4e1ae2f530cb0394db0c4d8bc Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 13 Aug 2020 12:18:47 -0700 Subject: [PATCH 265/632] CI: avoid file leaks in sas_xport tests (#35693) --- pandas/io/sas/sasreader.py | 10 ++++++++-- pandas/tests/io/sas/test_xport.py | 13 ++++++++++-- pandas/util/_test_decorators.py | 33 +++++++++++++++++++++---------- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index 291c9d1ee7f0c..fffdebda8c87a 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -6,7 +6,7 @@ from pandas._typing import FilePathOrBuffer, Label -from pandas.io.common import stringify_path +from pandas.io.common import get_filepath_or_buffer, stringify_path if TYPE_CHECKING: from pandas import DataFrame # noqa: F401 @@ -109,6 +109,10 @@ def read_sas( else: raise ValueError("unable to infer format of SAS file") + filepath_or_buffer, _, _, should_close = get_filepath_or_buffer( + filepath_or_buffer, encoding + ) + reader: ReaderBase if format.lower() == "xport": from pandas.io.sas.sas_xport import XportReader @@ -129,5 +133,7 @@ def read_sas( return reader data = reader.read() - reader.close() + + if should_close: + reader.close() return data diff --git a/pandas/tests/io/sas/test_xport.py b/pandas/tests/io/sas/test_xport.py index 2682bafedb8f1..939edb3d8e0b4 100644 --- a/pandas/tests/io/sas/test_xport.py +++ b/pandas/tests/io/sas/test_xport.py @@ -3,6 +3,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd import pandas._testing as tm @@ -26,10 +28,12 @@ def setup_method(self, datapath): self.dirpath = datapath("io", "sas", "data") self.file01 = os.path.join(self.dirpath, "DEMO_G.xpt") self.file02 = os.path.join(self.dirpath, "SSHSV1_A.xpt") - self.file02b = open(os.path.join(self.dirpath, "SSHSV1_A.xpt"), "rb") self.file03 = os.path.join(self.dirpath, "DRXFCD_G.xpt") self.file04 = os.path.join(self.dirpath, "paxraw_d_short.xpt") + with td.file_leak_context(): + yield + def test1_basic(self): # Tests with DEMO_G.xpt (all numeric file) @@ -127,7 +131,12 @@ def test2_binary(self): data_csv = pd.read_csv(self.file02.replace(".xpt", ".csv")) numeric_as_float(data_csv) - data = read_sas(self.file02b, format="xport") + with open(self.file02, "rb") as fd: + with td.file_leak_context(): + # GH#35693 ensure that if we pass an open file, we + # dont incorrectly close it in read_sas + data = read_sas(fd, format="xport") + tm.assert_frame_equal(data, data_csv) def test_multiple_types(self): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index bdf633839b2cd..0dad8c7397e37 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -23,8 +23,8 @@ def test_foo(): For more information, refer to the ``pytest`` documentation on ``skipif``. """ +from contextlib import contextmanager from distutils.version import LooseVersion -from functools import wraps import locale from typing import Callable, Optional @@ -237,23 +237,36 @@ def documented_fixture(fixture): def check_file_leaks(func) -> Callable: """ - Decorate a test function tot check that we are not leaking file descriptors. + Decorate a test function to check that we are not leaking file descriptors. """ - psutil = safe_import("psutil") - if not psutil: + with file_leak_context(): return func - @wraps(func) - def new_func(*args, **kwargs): + +@contextmanager +def file_leak_context(): + """ + ContextManager analogue to check_file_leaks. + """ + psutil = safe_import("psutil") + if not psutil: + yield + else: proc = psutil.Process() flist = proc.open_files() + conns = proc.connections() - func(*args, **kwargs) + yield flist2 = proc.open_files() - assert flist2 == flist - - return new_func + # on some builds open_files includes file position, which we _dont_ + # expect to remain unchanged, so we need to compare excluding that + flist_ex = [(x.path, x.fd) for x in flist] + flist2_ex = [(x.path, x.fd) for x in flist2] + assert flist2_ex == flist_ex, (flist2, flist) + + conns2 = proc.connections() + assert conns2 == conns, (conns2, conns) def async_mark(): From 59febbd2554eee524ccf9938eac9491bbedb1e58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Thu, 13 Aug 2020 18:04:49 -0400 Subject: [PATCH 266/632] BUG/ENH: consistent gzip compression arguments (#35645) --- doc/source/user_guide/io.rst | 11 +++++--- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_typing.py | 5 ++++ pandas/core/generic.py | 13 +++++++-- pandas/io/common.py | 31 +++++++++++---------- pandas/io/formats/csvs.py | 6 ++-- pandas/io/json/_json.py | 31 +++++++++++++++------ pandas/io/pickle.py | 8 +++--- pandas/io/stata.py | 19 ++++--------- pandas/tests/io/test_compression.py | 43 +++++++++++++++++++++++++++++ 10 files changed, 118 insertions(+), 50 deletions(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 35403b5c8b66f..43030d76d945a 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -287,16 +287,19 @@ Quoting, compression, and file format compression : {``'infer'``, ``'gzip'``, ``'bz2'``, ``'zip'``, ``'xz'``, ``None``, ``dict``}, default ``'infer'`` For on-the-fly decompression of on-disk data. If 'infer', then use gzip, - bz2, zip, or xz if filepath_or_buffer is a string ending in '.gz', '.bz2', + bz2, zip, or xz if ``filepath_or_buffer`` is path-like ending in '.gz', '.bz2', '.zip', or '.xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to ``None`` for no decompression. Can also be a dict with key ``'method'`` - set to one of {``'zip'``, ``'gzip'``, ``'bz2'``}, and other keys set to - compression settings. As an example, the following could be passed for - faster compression: ``compression={'method': 'gzip', 'compresslevel': 1}``. + set to one of {``'zip'``, ``'gzip'``, ``'bz2'``} and other key-value pairs are + forwarded to ``zipfile.ZipFile``, ``gzip.GzipFile``, or ``bz2.BZ2File``. + As an example, the following could be passed for faster compression and to + create a reproducible gzip archive: + ``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``. .. versionchanged:: 0.24.0 'infer' option added and set to default. .. versionchanged:: 1.1.0 dict option extended to support ``gzip`` and ``bz2``. + .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open`. thousands : str, default ``None`` Thousands separator. decimal : str, default ``'.'`` diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index deb5697053ea8..6612f741d925d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -235,6 +235,7 @@ I/O - Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) +- :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) Plotting ^^^^^^^^ diff --git a/pandas/_typing.py b/pandas/_typing.py index 47a102ddc70e0..1b972030ef5a5 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -109,3 +109,8 @@ # for arbitrary kwargs passed during reading/writing files StorageOptions = Optional[Dict[str, Any]] + + +# compression keywords and compression +CompressionDict = Mapping[str, Optional[Union[str, int, bool]]] +CompressionOptions = Optional[Union[str, CompressionDict]] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 11147bffa32c3..2219d54477d9e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -35,6 +35,7 @@ from pandas._libs.tslibs import Tick, Timestamp, to_offset from pandas._typing import ( Axis, + CompressionOptions, FilePathOrBuffer, FrameOrSeries, JSONSerializable, @@ -2058,7 +2059,7 @@ def to_json( date_unit: str = "ms", default_handler: Optional[Callable[[Any], JSONSerializable]] = None, lines: bool_t = False, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", index: bool_t = True, indent: Optional[int] = None, storage_options: StorageOptions = None, @@ -2646,7 +2647,7 @@ def to_sql( def to_pickle( self, path, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: @@ -3053,7 +3054,7 @@ def to_csv( index_label: Optional[Union[bool_t, str, Sequence[Label]]] = None, mode: str = "w", encoding: Optional[str] = None, - compression: Optional[Union[str, Mapping[str, str]]] = "infer", + compression: CompressionOptions = "infer", quoting: Optional[int] = None, quotechar: str = '"', line_terminator: Optional[str] = None, @@ -3144,6 +3145,12 @@ def to_csv( Compression is supported for binary file objects. + .. versionchanged:: 1.2.0 + + Previous versions forwarded dict entries for 'gzip' to + `gzip.open` instead of `gzip.GzipFile` which prevented + setting `mtime`. + quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC diff --git a/pandas/io/common.py b/pandas/io/common.py index 9ac642e58b544..54f35e689aac8 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -18,7 +18,6 @@ Optional, Tuple, Type, - Union, ) from urllib.parse import ( urljoin, @@ -29,7 +28,12 @@ ) import zipfile -from pandas._typing import FilePathOrBuffer, StorageOptions +from pandas._typing import ( + CompressionDict, + CompressionOptions, + FilePathOrBuffer, + StorageOptions, +) from pandas.compat import _get_lzma_file, _import_lzma from pandas.compat._optional import import_optional_dependency @@ -160,7 +164,7 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: def get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, encoding: Optional[str] = None, - compression: Optional[str] = None, + compression: CompressionOptions = None, mode: Optional[str] = None, storage_options: StorageOptions = None, ): @@ -188,7 +192,7 @@ def get_filepath_or_buffer( Returns ------- - Tuple[FilePathOrBuffer, str, str, bool] + Tuple[FilePathOrBuffer, str, CompressionOptions, bool] Tuple containing the filepath or buffer, the encoding, the compression and should_close. """ @@ -291,8 +295,8 @@ def file_path_to_url(path: str) -> str: def get_compression_method( - compression: Optional[Union[str, Mapping[str, Any]]] -) -> Tuple[Optional[str], Dict[str, Any]]: + compression: CompressionOptions, +) -> Tuple[Optional[str], CompressionDict]: """ Simplifies a compression argument to a compression method string and a mapping containing additional arguments. @@ -316,7 +320,7 @@ def get_compression_method( if isinstance(compression, Mapping): compression_args = dict(compression) try: - compression_method = compression_args.pop("method") + compression_method = compression_args.pop("method") # type: ignore except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: @@ -383,7 +387,7 @@ def get_handle( path_or_buf, mode: str, encoding=None, - compression: Optional[Union[str, Mapping[str, Any]]] = None, + compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors=None, @@ -464,16 +468,13 @@ def get_handle( # GZ Compression if compression == "gzip": if is_path: - f = gzip.open(path_or_buf, mode, **compression_args) + f = gzip.GzipFile(filename=path_or_buf, mode=mode, **compression_args) else: f = gzip.GzipFile(fileobj=path_or_buf, mode=mode, **compression_args) # BZ Compression elif compression == "bz2": - if is_path: - f = bz2.BZ2File(path_or_buf, mode, **compression_args) - else: - f = bz2.BZ2File(path_or_buf, mode=mode, **compression_args) + f = bz2.BZ2File(path_or_buf, mode=mode, **compression_args) # ZIP Compression elif compression == "zip": @@ -577,7 +578,9 @@ def __init__( if mode in ["wb", "rb"]: mode = mode.replace("b", "") self.archive_name = archive_name - super().__init__(file, mode, zipfile.ZIP_DEFLATED, **kwargs) + kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED} + kwargs_zip.update(kwargs) + super().__init__(file, mode, **kwargs_zip) def write(self, data): archive_name = self.filename diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 6eceb94387171..c462a96da7133 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -5,13 +5,13 @@ import csv as csvlib from io import StringIO, TextIOWrapper import os -from typing import Hashable, List, Mapping, Optional, Sequence, Union +from typing import Hashable, List, Optional, Sequence, Union import warnings import numpy as np from pandas._libs import writers as libwriters -from pandas._typing import FilePathOrBuffer, StorageOptions +from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions from pandas.core.dtypes.generic import ( ABCDatetimeIndex, @@ -44,7 +44,7 @@ def __init__( mode: str = "w", encoding: Optional[str] = None, errors: str = "strict", - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", quoting: Optional[int] = None, line_terminator="\n", chunksize: Optional[int] = None, diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 0d2b351926343..c2bd6302940bb 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -3,13 +3,13 @@ from io import BytesIO, StringIO from itertools import islice import os -from typing import Any, Callable, Optional, Type +from typing import IO, Any, Callable, List, Optional, Type import numpy as np import pandas._libs.json as json from pandas._libs.tslibs import iNaT -from pandas._typing import JSONSerializable, StorageOptions +from pandas._typing import CompressionOptions, JSONSerializable, StorageOptions from pandas.errors import AbstractMethodError from pandas.util._decorators import deprecate_kwarg, deprecate_nonkeyword_arguments @@ -19,7 +19,12 @@ from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.reshape.concat import concat -from pandas.io.common import get_filepath_or_buffer, get_handle, infer_compression +from pandas.io.common import ( + get_compression_method, + get_filepath_or_buffer, + get_handle, + infer_compression, +) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import build_table_schema, parse_table_schema from pandas.io.parsers import _validate_integer @@ -41,7 +46,7 @@ def to_json( date_unit: str = "ms", default_handler: Optional[Callable[[Any], JSONSerializable]] = None, lines: bool = False, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", index: bool = True, indent: int = 0, storage_options: StorageOptions = None, @@ -369,7 +374,7 @@ def read_json( encoding=None, lines: bool = False, chunksize: Optional[int] = None, - compression="infer", + compression: CompressionOptions = "infer", nrows: Optional[int] = None, storage_options: StorageOptions = None, ): @@ -607,7 +612,9 @@ def read_json( if encoding is None: encoding = "utf-8" - compression = infer_compression(path_or_buf, compression) + compression_method, compression = get_compression_method(compression) + compression_method = infer_compression(path_or_buf, compression_method) + compression = dict(compression, method=compression_method) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, @@ -667,10 +674,13 @@ def __init__( encoding, lines: bool, chunksize: Optional[int], - compression, + compression: CompressionOptions, nrows: Optional[int], ): + compression_method, compression = get_compression_method(compression) + compression = dict(compression, method=compression_method) + self.orient = orient self.typ = typ self.dtype = dtype @@ -687,6 +697,7 @@ def __init__( self.nrows_seen = 0 self.should_close = False self.nrows = nrows + self.file_handles: List[IO] = [] if self.chunksize is not None: self.chunksize = _validate_integer("chunksize", self.chunksize, 1) @@ -735,8 +746,8 @@ def _get_data_from_filepath(self, filepath_or_buffer): except (TypeError, ValueError): pass - if exists or self.compression is not None: - data, _ = get_handle( + if exists or self.compression["method"] is not None: + data, self.file_handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, @@ -816,6 +827,8 @@ def close(self): self.open_stream.close() except (IOError, AttributeError): pass + for file_handle in self.file_handles: + file_handle.close() def __next__(self): if self.nrows: diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index eee6ec7c9feca..fc1d2e385cf72 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -1,9 +1,9 @@ """ pickle compat """ import pickle -from typing import Any, Optional +from typing import Any import warnings -from pandas._typing import FilePathOrBuffer, StorageOptions +from pandas._typing import CompressionOptions, FilePathOrBuffer, StorageOptions from pandas.compat import pickle_compat as pc from pandas.io.common import get_filepath_or_buffer, get_handle @@ -12,7 +12,7 @@ def to_pickle( obj: Any, filepath_or_buffer: FilePathOrBuffer, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ): @@ -114,7 +114,7 @@ def to_pickle( def read_pickle( filepath_or_buffer: FilePathOrBuffer, - compression: Optional[str] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): """ diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 7a25617885839..ec3819f1673a8 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -35,7 +35,7 @@ from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array -from pandas._typing import FilePathOrBuffer, Label, StorageOptions +from pandas._typing import CompressionOptions, FilePathOrBuffer, Label, StorageOptions from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -1938,9 +1938,9 @@ def read_stata( def _open_file_binary_write( fname: FilePathOrBuffer, - compression: Union[str, Mapping[str, str], None], + compression: CompressionOptions, storage_options: StorageOptions = None, -) -> Tuple[BinaryIO, bool, Optional[Union[str, Mapping[str, str]]]]: +) -> Tuple[BinaryIO, bool, CompressionOptions]: """ Open a binary file or no-op if file-like. @@ -1978,17 +1978,10 @@ def _open_file_binary_write( # Extract compression mode as given, if dict compression_typ, compression_args = get_compression_method(compression) compression_typ = infer_compression(fname, compression_typ) - path_or_buf, _, compression_typ, _ = get_filepath_or_buffer( - fname, - mode="wb", - compression=compression_typ, - storage_options=storage_options, + compression = dict(compression_args, method=compression_typ) + path_or_buf, _, compression, _ = get_filepath_or_buffer( + fname, mode="wb", compression=compression, storage_options=storage_options, ) - if compression_typ is not None: - compression = compression_args - compression["method"] = compression_typ - else: - compression = None f, _ = get_handle(path_or_buf, "wb", compression=compression, is_text=False) return f, True, compression else: diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 902a3d5d2a397..bc14b485f75e5 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,7 +1,10 @@ +import io import os +from pathlib import Path import subprocess import sys import textwrap +import time import pytest @@ -130,6 +133,46 @@ def test_compression_binary(compression_only): ) +def test_gzip_reproducibility_file_name(): + """ + Gzip should create reproducible archives with mtime. + + Note: Archives created with different filenames will still be different! + + GH 28103 + """ + df = tm.makeDataFrame() + compression_options = {"method": "gzip", "mtime": 1} + + # test for filename + with tm.ensure_clean() as path: + path = Path(path) + df.to_csv(path, compression=compression_options) + time.sleep(2) + output = path.read_bytes() + df.to_csv(path, compression=compression_options) + assert output == path.read_bytes() + + +def test_gzip_reproducibility_file_object(): + """ + Gzip should create reproducible archives with mtime. + + GH 28103 + """ + df = tm.makeDataFrame() + compression_options = {"method": "gzip", "mtime": 1} + + # test for file object + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + output = buffer.getvalue() + time.sleep(2) + buffer = io.BytesIO() + df.to_csv(buffer, compression=compression_options, mode="wb") + assert output == buffer.getvalue() + + def test_with_missing_lzma(): """Tests if import pandas works when lzma is not present.""" # https://github.com/pandas-dev/pandas/issues/27575 From 6ea8474f502a14a97fd1407e9a073228abe36c7e Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Fri, 14 Aug 2020 02:50:51 +0100 Subject: [PATCH 267/632] REGR: Dataframe.reset_index() on empty DataFrame with MI and datatime level (#35673) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/frame.py | 2 +- .../tests/frame/methods/test_reset_index.py | 30 +++++++++++++++++++ 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index b37103910afab..98d67e930ccc0 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) +- Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 547d86f221b5f..1587dd8798ec3 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4816,7 +4816,7 @@ def _maybe_casted_values(index, labels=None): # we can have situations where the whole mask is -1, # meaning there is nothing found in labels, so make all nan's - if mask.all(): + if mask.size > 0 and mask.all(): dtype = index.dtype fill_value = na_value_for_dtype(dtype) values = construct_1d_arraylike_from_scalar( diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py index da4bfa9be4881..b88ef0e6691cb 100644 --- a/pandas/tests/frame/methods/test_reset_index.py +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -318,3 +318,33 @@ def test_reset_index_dtypes_on_empty_frame_with_multiindex(array, dtype): result = DataFrame(index=idx)[:0].reset_index().dtypes expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": dtype}) tm.assert_series_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex(): + # https://github.com/pandas-dev/pandas/issues/35606 + idx = MultiIndex( + levels=[[pd.Timestamp("2020-07-20 00:00:00")], [3, 4]], + codes=[[], []], + names=["a", "b"], + ) + df = DataFrame(index=idx, columns=["c", "d"]) + result = df.reset_index() + expected = DataFrame( + columns=list("abcd"), index=RangeIndex(start=0, stop=0, step=1) + ) + expected["a"] = expected["a"].astype("datetime64[ns]") + expected["b"] = expected["b"].astype("int64") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby(): + # https://github.com/pandas-dev/pandas/issues/35657 + df = DataFrame(dict(c1=[10.0], c2=["a"], c3=pd.to_datetime("2020-01-01"))) + df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum() + result = df.reset_index() + expected = DataFrame( + columns=["c2", "c3", "c1"], index=RangeIndex(start=0, stop=0, step=1) + ) + expected["c3"] = expected["c3"].astype("datetime64[ns]") + expected["c1"] = expected["c1"].astype("float64") + tm.assert_frame_equal(result, expected) From faa6e36e482ec5f00e6502c1e5d4fdf4f2f08009 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Fri, 14 Aug 2020 13:32:00 +0100 Subject: [PATCH 268/632] CLN: remove extant uses of built-in filter function (#35717) --- pandas/_config/localization.py | 14 ++++++++------ pandas/core/computation/expr.py | 7 +++---- pandas/core/reshape/merge.py | 7 +++++-- pandas/io/json/_json.py | 5 +++-- pandas/io/parsers.py | 4 +--- pandas/io/pytables.py | 20 +++++++++----------- pandas/tests/computation/test_eval.py | 26 +++++++++++++++----------- 7 files changed, 44 insertions(+), 39 deletions(-) diff --git a/pandas/_config/localization.py b/pandas/_config/localization.py index 66865e1afb952..3933c8f3d519c 100644 --- a/pandas/_config/localization.py +++ b/pandas/_config/localization.py @@ -88,12 +88,14 @@ def _valid_locales(locales, normalize): valid_locales : list A list of valid locales. """ - if normalize: - normalizer = lambda x: locale.normalize(x.strip()) - else: - normalizer = lambda x: x.strip() - - return list(filter(can_set_locale, map(normalizer, locales))) + return [ + loc + for loc in ( + locale.normalize(loc.strip()) if normalize else loc.strip() + for loc in locales + ) + if can_set_locale(loc) + ] def _default_locale_getter(): diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index fcccc24ed7615..125ecb0d88036 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -167,10 +167,9 @@ def _is_type(t): # partition all AST nodes _all_nodes = frozenset( - filter( - lambda x: isinstance(x, type) and issubclass(x, ast.AST), - (getattr(ast, node) for node in dir(ast)), - ) + node + for node in (getattr(ast, name) for name in dir(ast)) + if isinstance(node, type) and issubclass(node, ast.AST) ) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 2349cb1dcc0c7..01e20f49917ac 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -2012,8 +2012,11 @@ def _sort_labels(uniques: np.ndarray, left, right): def _get_join_keys(llab, rlab, shape, sort: bool): # how many levels can be done without overflow - pred = lambda i: not is_int64_overflow_possible(shape[:i]) - nlev = next(filter(pred, range(len(shape), 0, -1))) + nlev = next( + lev + for lev in range(len(shape), 0, -1) + if not is_int64_overflow_possible(shape[:lev]) + ) # get keys for the first `nlev` levels stride = np.prod(shape[1:nlev], dtype="i8") diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index c2bd6302940bb..fe5e172655ae1 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -765,8 +765,9 @@ def _combine_lines(self, lines) -> str: """ Combines a list of JSON objects into one JSON object. """ - lines = filter(None, map(lambda x: x.strip(), lines)) - return "[" + ",".join(lines) + "]" + return ( + f'[{",".join((line for line in (line.strip() for line in lines) if line))}]' + ) def read(self): """ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9dc0e1f71d13b..5d49757ce7d58 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2161,9 +2161,7 @@ def read(self, nrows=None): if self.usecols is not None: columns = self._filter_usecols(columns) - col_dict = dict( - filter(lambda item: item[0] in columns, col_dict.items()) - ) + col_dict = {k: v for k, v in col_dict.items() if k in columns} return index, columns, col_dict diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 2abc570a04de3..f08e0514a68e1 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -99,22 +99,20 @@ def _ensure_str(name): def _ensure_term(where, scope_level: int): """ - ensure that the where is a Term or a list of Term - this makes sure that we are capturing the scope of variables - that are passed - create the terms here with a frame_level=2 (we are 2 levels down) + Ensure that the where is a Term or a list of Term. + + This makes sure that we are capturing the scope of variables that are + passed create the terms here with a frame_level=2 (we are 2 levels down) """ # only consider list/tuple here as an ndarray is automatically a coordinate # list level = scope_level + 1 if isinstance(where, (list, tuple)): - wlist = [] - for w in filter(lambda x: x is not None, where): - if not maybe_expression(w): - wlist.append(w) - else: - wlist.append(Term(w, scope_level=level)) - where = wlist + where = [ + Term(term, scope_level=level + 1) if maybe_expression(term) else term + for term in where + if term is not None + ] elif maybe_expression(where): where = Term(where, scope_level=level) return where if where is None or len(where) else None diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 08d8d5ca342b7..853ab00853d1b 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -168,7 +168,7 @@ def setup_ops(self): def setup_method(self, method): self.setup_ops() self.setup_data() - self.current_engines = filter(lambda x: x != self.engine, _engines) + self.current_engines = (engine for engine in _engines if engine != self.engine) def teardown_method(self, method): del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses @@ -774,11 +774,9 @@ def setup_class(cls): cls.parser = "python" def setup_ops(self): - self.cmp_ops = list( - filter(lambda x: x not in ("in", "not in"), expr._cmp_ops_syms) - ) + self.cmp_ops = [op for op in expr._cmp_ops_syms if op not in ("in", "not in")] self.cmp2_ops = self.cmp_ops[::-1] - self.bin_ops = [s for s in expr._bool_ops_syms if s not in ("and", "or")] + self.bin_ops = [op for op in expr._bool_ops_syms if op not in ("and", "or")] self.special_case_ops = _special_case_arith_ops_syms self.arith_ops = _good_arith_ops self.unary_ops = "+", "-", "~" @@ -1150,9 +1148,9 @@ def eval(self, *args, **kwargs): return pd.eval(*args, **kwargs) def test_simple_arith_ops(self): - ops = self.arith_ops + ops = (op for op in self.arith_ops if op != "//") - for op in filter(lambda x: x != "//", ops): + for op in ops: ex = f"1 {op} 1" ex2 = f"x {op} 1" ex3 = f"1 {op} (x + 1)" @@ -1637,8 +1635,11 @@ def setup_class(cls): super().setup_class() cls.engine = "numexpr" cls.parser = "python" - cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms - cls.arith_ops = filter(lambda x: x not in ("in", "not in"), cls.arith_ops) + cls.arith_ops = [ + op + for op in expr._arith_ops_syms + expr._cmp_ops_syms + if op not in ("in", "not in") + ] def test_check_many_exprs(self): a = 1 # noqa @@ -1726,8 +1727,11 @@ class TestOperationsPythonPython(TestOperationsNumExprPython): def setup_class(cls): super().setup_class() cls.engine = cls.parser = "python" - cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms - cls.arith_ops = filter(lambda x: x not in ("in", "not in"), cls.arith_ops) + cls.arith_ops = [ + op + for op in expr._arith_ops_syms + expr._cmp_ops_syms + if op not in ("in", "not in") + ] class TestOperationsPythonPandas(TestOperationsNumExprPandas): From ba3400d591fd00c41ff7e7f0f91032e72498316c Mon Sep 17 00:00:00 2001 From: attack68 <24256554+attack68@users.noreply.github.com> Date: Fri, 14 Aug 2020 14:36:09 +0200 Subject: [PATCH 269/632] BUG: Styler cell_ids fails on multiple renders (#35664) --- doc/source/whatsnew/v1.1.1.rst | 2 +- pandas/io/formats/style.py | 14 +++++++------- pandas/tests/io/formats/test_style.py | 5 ++++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 98d67e930ccc0..3f177b29d52b8 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -33,7 +33,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`). +- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). Categorical ^^^^^^^^^^^ diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 584f42a6cab12..3bbb5271bce61 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -390,16 +390,16 @@ def format_attr(pair): "is_visible": (c not in hidden_columns), } # only add an id if the cell has a style + props = [] if self.cell_ids or (r, c) in ctx: row_dict["id"] = "_".join(cs[1:]) + for x in ctx[r, c]: + # have to handle empty styles like [''] + if x.count(":"): + props.append(tuple(x.split(":"))) + else: + props.append(("", "")) row_es.append(row_dict) - props = [] - for x in ctx[r, c]: - # have to handle empty styles like [''] - if x.count(":"): - props.append(tuple(x.split(":"))) - else: - props.append(("", "")) cellstyle_map[tuple(props)].append(f"row{r}_col{c}") body.append(row_es) diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 3ef5157655e78..6025649e9dbec 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1684,8 +1684,11 @@ def f(a, b, styler): def test_no_cell_ids(self): # GH 35588 + # GH 35663 df = pd.DataFrame(data=[[0]]) - s = Styler(df, uuid="_", cell_ids=False).render() + styler = Styler(df, uuid="_", cell_ids=False) + styler.render() + s = styler.render() # render twice to ensure ctx is not updated assert s.find('') != -1 From 21eb4e6febd6ce0408ae2ac5e8c02056d4fb9f93 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Fri, 14 Aug 2020 16:35:21 +0200 Subject: [PATCH 270/632] REGR: fix DataFrame.diff with read-only data (#35707) Co-authored-by: Jeff Reback --- doc/source/whatsnew/v1.1.1.rst | 3 ++- pandas/_libs/algos.pyx | 7 ++++--- pandas/tests/frame/methods/test_diff.py | 9 +++++++++ setup.py | 3 +++ 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 3f177b29d52b8..85e2a335c55c6 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -16,10 +16,11 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) -- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`). +- Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`) - Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) +- Fixed regression in :meth:`DataFrame.diff` with read-only data (:issue:`35559`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 7e90a8cc681ef..0a70afda893cf 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -1200,14 +1200,15 @@ ctypedef fused out_t: @cython.boundscheck(False) @cython.wraparound(False) def diff_2d( - diff_t[:, :] arr, - out_t[:, :] out, + ndarray[diff_t, ndim=2] arr, # TODO(cython 3) update to "const diff_t[:, :] arr" + ndarray[out_t, ndim=2] out, Py_ssize_t periods, int axis, ): cdef: Py_ssize_t i, j, sx, sy, start, stop - bint f_contig = arr.is_f_contig() + bint f_contig = arr.flags.f_contiguous + # bint f_contig = arr.is_f_contig() # TODO(cython 3) # Disable for unsupported dtype combinations, # see https://github.com/cython/cython/issues/2646 diff --git a/pandas/tests/frame/methods/test_diff.py b/pandas/tests/frame/methods/test_diff.py index 45f134a93a23a..0486fb2d588b6 100644 --- a/pandas/tests/frame/methods/test_diff.py +++ b/pandas/tests/frame/methods/test_diff.py @@ -214,3 +214,12 @@ def test_diff_integer_na(self, axis, expected): # Test case for default behaviour of diff result = df.diff(axis=axis) tm.assert_frame_equal(result, expected) + + def test_diff_readonly(self): + # https://github.com/pandas-dev/pandas/issues/35559 + arr = np.random.randn(5, 2) + arr.flags.writeable = False + df = pd.DataFrame(arr) + result = df.diff() + expected = pd.DataFrame(np.array(df)).diff() + tm.assert_frame_equal(result, expected) diff --git a/setup.py b/setup.py index 43d19d525876b..f6f0cd9aabc0e 100755 --- a/setup.py +++ b/setup.py @@ -456,6 +456,9 @@ def run(self): if sys.version_info[:2] == (3, 8): # GH 33239 extra_compile_args.append("-Wno-error=deprecated-declarations") + # https://github.com/pandas-dev/pandas/issues/35559 + extra_compile_args.append("-Wno-error=unreachable-code") + # enable coverage by building cython files by setting the environment variable # "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext # with `--with-cython-coverage`enabled From 39894938dc6d20487f59c3706e0b46b0fb081296 Mon Sep 17 00:00:00 2001 From: Jiaxiang Date: Sat, 15 Aug 2020 00:17:37 +0800 Subject: [PATCH 271/632] [BUG] fixed DateOffset pickle bug when months >= 12 (#35258) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/tslibs/offsets.pyx | 7 - .../1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle | Bin 0 -> 127216 bytes .../tests/io/generate_legacy_storage_files.py | 8 +- .../offsets/data/dateoffset_0_15_2.pickle | 183 ------------------ pandas/tests/tseries/offsets/test_offsets.py | 25 +-- 6 files changed, 14 insertions(+), 210 deletions(-) create mode 100644 pandas/tests/io/data/legacy_pickle/1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle delete mode 100644 pandas/tests/tseries/offsets/data/dateoffset_0_15_2.pickle diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6612f741d925d..a3bb6dfd86bd2 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -172,6 +172,7 @@ Datetimelike ^^^^^^^^^^^^ - Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) - Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`) +- Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) - Timedelta diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index ac2725fc58aee..7f0314d737619 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -989,13 +989,6 @@ cdef class RelativeDeltaOffset(BaseOffset): state["_offset"] = state.pop("offset") state["kwds"]["offset"] = state["_offset"] - if "_offset" in state and not isinstance(state["_offset"], timedelta): - # relativedelta, we need to populate using its kwds - offset = state["_offset"] - odict = offset.__dict__ - kwds = {key: odict[key] for key in odict if odict[key]} - state.update(kwds) - self.n = state.pop("n") self.normalize = state.pop("normalize") self._cache = state.pop("_cache", {}) diff --git a/pandas/tests/io/data/legacy_pickle/1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle b/pandas/tests/io/data/legacy_pickle/1.1.0/1.1.0_x86_64_darwin_3.8.5.pickle new file mode 100644 index 0000000000000000000000000000000000000000..f8df9afff65658d0f58a5b2afb30997cea53e4c8 GIT binary patch literal 127216 zcmdqq1z6n7qp0y@0u-oHcNc2Z-AkQ9-G!oEwpfw6JMGq~ySux)ySux)+nwybTd4Q@ z&i|Zy&z(NqAK7F>GM>qV?O`+On2%x(U89{%cHw?u0e<1p`p>rhfgxTI(Wb;V{aS~9N`xh>=hX9666)^)!HvCT<)!2 zKyd3ozlwn&K5g7B??#(<;@l$Il=9n8`}%e83wQDM3ipdPIeG^BwfC$ZEZ>vcn^OI3 z%P_Ct*7~M(b*-D~r(zTA733G~VX_U6@Cu8Fc2i6?;SnLB(QdZ-4}S6oHL_oH-Dod) z49S8cgF-u6&Ne7AFe1P!EX=E8v?-ovn4eEbaCk&mq)&u?2)5=8qfK#S?3gzhDR1YoRj~ZWOFu%Ad5z*dJ zuFjU^u)aPm$4})g+T+{q(Q-K_GD4#b2~2k0di;k+w=lnL*Ul?2QeTnXWG_?EQ?8RKW$zssAWuJ7{%9K>7~rFy zft%uH>!#`_5a|^eE`Ptt##5eLv_UmFcn0}Jc=>uoc*(sQl6#qKy&@uHig%R@9+BQA zP3DGZgPkS27#s}oV@GLDlL&|vk%qj#jYgIDv-17&Uqz## zs2;_|4eo{#<~TEyHqilM6Y#BGBDI{5ki=T5y=tW!6sUB1gh z<@7w%BHFpC^>p3TuZ7YxFu*(9B|;{qPC$^KEDk}TGJPHDTNh*+542a5A%_o+@8jg* zp~t#O^A8D$u1dC3{Qf4_S)(g3Cc47^GP*>B%!of^6H~gM6?mE3{LJSe>lJhTP>DCJ zh2JpJ6#x78%oTz;pPJIf-l@3|hr7hu=hu%`pnOx;*?O8k{$u8JlsQ#qPWPA;viTu% z%3tp)WTBGPgZvi}-A~VdQLeQWW%u3lcBc|6O0rHN4Rn3u+RW5lF>&m3UEz|q!f9!p zE!&3bPOaVWvhI^>_w$^QmJRRTS$D8a(*0)=>$^4Cdisa?wT(8V;Y7d50U`eW;eIl< zOKay0)CS34NgCA>dY;ZZ0g+g7Rs92^_*=N=-19KP_NmY zep&8pr86o_d|4IO^g z2FM(ys;YVKKii2>ktY@9YAaENzLlTJAv`23A|%Y$FHG+)=-md(F+4E5^80np*AFPq zU$@+e%Im+&(sEzYW9*7Kv8=Io$B^9c;JX-oCf|Eye$UVF_E&N8^>qH>NgEwyGrit@j! z{9h^BSV|V(%3gZHns}YsuP$ zH>~`&sxdbLvOUy0ud=z(x0k$JkguJh4LxEf zrzwuP_{knqlK7$%S3 zl+fCTtKamytY;G$93Zbz^$wL`mB}WwqdWnV@C=pR8E2DYgL?TYd&vvDu;Az#Q`dj_ zA}{>3n(p;wbjk}D@1N(FSmzcKr>nn-(+z(gr*@&TsSWw(y?;~cpY_<-nJKlWXK2R` zo{_PlgzNh6aLNE!|EZFCT-SykN9m zF&jV0hz^hJYi%t5BJ*GWCZQhxbwWkT;f^qUkU)?Pu<3$g73e;n!mXmaTrZK2dqg z`dDKL|AmA0iYYHCWuHH$!ONyESsFa?UHxiu&@ZJdF>I(T&sknJ%d3v+#>Db7bn_jI z$t??VxZuASt+js>tv|bTu|~4YN8beNvOF$x_|@{)iu*qkt>yKIv<^TR-SwrPN2{@< zzOJ&dl#V4z4gWGqZGOzDF`b@o^6an2p=7N8q#G~C$1J(|AB}kZ*b}hqN3W!;X=F`~ zUoD#}sUNNgEc>yv%MYf$8<49ov8L zF}+h9`(u7FHG@A{*jcY=OB>t83>-q{#0>08Dd@1{avZ=CS%LkUA|aKlWd6s!sOU0mw><9$Jg$( z;ep9E!Vu=?7cH+-&ACG#%9q2{zFbfL$F3TMBFvp_zi+P^jhAHX8}G^&%VqL^ro>yW zl>bSImj_@D-;XrtCG~&4l*P*T`}N!@t8ViBXMNF9cYmAjZRC-gSH$G|U*}?DF+J=2 zyQcDM-ihgf{BQ~To56V*nLnnBW~tE!xLSLOq{g56A4$twHcW|WF(YQite730F&E~E z8Cm(OV)L6*kJXvUsVB+TR=0#Tft0V+jOA9y&#D=derfC}hsbjyWDzi$^q0Z<%Mkr#sCZ}n=f{y+U~{=fZWHn-&eht?I} z7mVlD72kKQ-&$7)H#;|B?xp{x#Ke>1h?YVV(JB1j7MhsT{=3@syHfI3W0Kaf$lq(e z$Xo0o2eo#~xVN;Z#(jE~zhAx>RvC{;^wGq>8VKDw$TQU&Es636|M*J}$zSSh(p)Ro zk9JR#Kp)uDf3yy4{?F(ASoyH1yull*t6NE5{8?Am!@43SAO0p+#kc0Ff^v}hUuUbB zHt=heH0f&ahHTg_y`{B?#_K5d=#NoMFj_{R)=KxSw;o2Ws3S2;4A(=tB) zKc9{NvgZHI07tiPbHB-&$zwVjmRfkUbwy0>r+Z>u@i*fba@^W{Ti-lxU0vqfU)_S& zr>6XDgi|)6<{)c$bCAE>9RA7V4C|d(lZ_nAl|vB!&rZ}ZK9Z+r9#=75*6VJkueVvg znIQ4qy^?S4R2%Qf4;mlJmp|S*u|~NKacqlr7zgdq0Ua?e#>4n%E=ntnZ}c-*VzPG+ z363y)y`iGt5!Y|0Si9zm{E$(|)HL5mkttEQ$;lv*F>|i8ALd-i>(E%oE#u7` zh)(4{lJy<*d1$_VfpYqz%vRsrfj1XWIq&A{7V>U-0)57vdGl{_*SFK$xOMmQ&sp!_ zaf$zvTX)1vP)yAHli6rQQtjp`XXp_&b#4ACdPF@I z&JoA;30wL5&5w*L?Qwm3%J|ZwWiM<`+PnI0(flX-J%oEt04 z-)4&c{oAr0HrWLF`O91V|CcLN+pvJvhQB(D|5T=Slxe1i*oD$szek$lgv^DwsgqraMLW&d@Q zOP;l=3ZZG{`1)ZuEcS+Sl9~Qb-|!EE=izc1TBv8tRD6T>7jrTaek~SqW|=8N>>JqB zBjKSk<@{XCOL7iSJ@ewX)rcv+r)4Woa}kO*CHZC=f@PhYIB34*Z7oG^34b~cXr7{1 zIMx)spH9=(>kcKJHN*W7fiiiF3uPgVa<$i|hw4?f9)ae0$qB5-YMs9R!>1iAEmk%8 zsaWBkjz-6fGMf{5nunc=S|#$#Qv>@VlGqv1g*2;&_vxWKBEdC;91bI&BGMe9HTya&-Sl`h68% z$@`$`m$n$D8&u)>W!;U(`{jKtjq97Hi|*SL=UTs!R-G5LT6c6wVf$8k>%i!Gx`kWh zp01n8>E7UX($MK$FY0b7lsNZ$X@Xs|JLa`A_(_rE9ojaT)AQKO8vq(4d21hQXZNz|A! zb4qR+Q)V9MOMg*j9K0jKzm}RvlfA5!t+Rv-+P@qPj%m5R9f2@UBK>9<;%`z!@8ii5 zDf{IA*%E24=@$PfrVCkPTE9^Lq5RB{v23!JovW`6h2xJoKF-$)KCRxp)Vr4QV~XCj z)GK_wdC$WZ6idqEbo>2T>BpmY&h)11 zfB9L-+KGxUtM!cj3u&g;b#g?a+JCC&`m;1_Y_99v%%fR;E;wVt+ZB_Wcc!SYycOe))g@~JAOU-9B%ZL*}!~VD6c7t|5t9V>s6IyZkXyNA87EF|K+1Ns;~Sn zyDqA~{NI{u7|Zhwf4JkWSD<Gs}epeqV zZyZ&ze1@1D{GTeS){9Jipg<3L7fKKMfPo(5fPpzA#6&1Ng61F}ax(`x>TC{4F&QSu z6lgBT^5;}6r^Ymx7SmyRw4QoKmNQ{y%z{}l8)nBG=!`ir7v{!1m=|3zA6hGt0xTEA zLRc7!U{NfF)^@Q3%O$ZCmc}wz7QcJ+SgFAK710$dVP&j>*4wkz3$W_EUjyB+Cc2{s z)7wchtY=8~15jMsq*c6*#b8LYv(Gy#t7kZ-)`l28DV{0^E02;9k24WBf zV+e*~TMWZ+jKD~2hwZTgcEnED8M|Ot?1tU32lm8X*c*d2I0nb!I2?}?a3W5^$v6e4;xwF&GjJx(!r3?n=i)q^j|*@iF2cpQ z1efA6T#hSnC9cBNxCYnaI$Vz%a3gNQ&A0`(;x^olJ8&oN!rizB_u@X>j|cD|9>T+T z1drk|JdP*uB%Z?4cm~hnIXsUS@FHHq%XkH^;x)XEH}EFj!rOQU@8UhYj}P!6KElWN z1fSwFe2y>hCBDMf_y*tNJA98H@FRZ0&-ew+7clZ?Rkge!=k}R{hBj!6b{Gfk(E;@b zPW7Naa;k@T7#|a0LbN`7?8I_nOoB-<879XRm=aTAYD|M^F&(DI444r!VP?#N=F1^@ z9@$vVjycd7b7C&cjd?IHx?n!cj|H$G7Q(_<1dF0RJy;LLu>_Vx{kc>!}C*0aZP;Mg5^j zJ%pn^yH^j9*bduc2keNQurqeSuBhKr(S!a_f*yKcPwa)gu@Cmee%K!eU=$9-L1@Ck zI0T2{FdU8}a3n_KC>)Jra4e3)@i+k|;v}4mQ*bIy!|6B!XW}fJjdO4=&cpe*02ksS zT#QR_DK5k1xB^$=DqM|ga4oLG^|%2y;wIdTTW~9G!|k{Ocj7MGjeBq}?!*0f01x6J zJd8*1C?3P(cmhx2DLjp5@GPFg^LPO-;w8L{SMVxc!|QkhZ{jVyjd$=a-oyL&03YHb ze2h=_xJ%n;wSu!U(h^wDeD1Mn5zRtpnUS)9RAZYToSO~ zgqR4OP=De@4@od7CPV#sQ9YzU{c%)1q{7sg2Ge3XOph5bBWA+Pm<6+<{-CQKvSSW( z#+;Z7b7LONi!PWC^J4)lh=s5)7Qv!e42xq4)SnL3gZ_A^9!g^wEQ{r^JXXMp=!%uF zGFHK=SPiRV4Rphrs6WxF2M?@;wXqJ?#d=sD8(>3hgpIKYHpOPx99v*Z^u$)^h2H3c zzUYVgLymfAjRp)rBeuan48mXx!BA|AVHl1P7>W8*m3nB89k3&I!p^8aY^jH?*bTd5 z5A2D(us8O>zSs}@;{c4pfj9_FI2ecEP#lKCaRiRUXdH#3aSV>daX20);6$8+lW_`8 z#c4PlXW&eng|l%E&c%5+9~a<4T!f2p2`Lkg}ZSN?!|q$9}nO`JcNhw2p+{_cpOjQNj!z8@eH2Db9f#v;6=QIm+=Z- z#cOySZ{SV5g}3nz-o<-(A0OaDe1wnj2|mSV_#9v0OMHc|@eRJkclaJZ;79y~pYaP? zr%0-rrJfL|q5jaS9&FJLzU{Xwm$uR|5|L|BUmQ!OI zG~arb$DfYn^q2uNVkXRtSuiVR!|a#?ol$?1R1di@H|D{-=z{q$KNi4(SO^Pa5iE+u zusG_^uIiyAmcr6l2FqeOERPkiBD!KFtc>QFqVo4tWw{zw#~SE{HPIbCuol+FI#?I= zhd}jE9~)ppY=n)m2{y%M*c@A6OZ3E6s6S?^2XFL2U-UzNY>fsCKqI!nKn%iQ48c%r zi(wd!5vV_hsfTvh9y?%1?1Y`M3wFhB*d2RdPwa)gu@Cmee%K!eU=$9-L1@CkI0T2{ zFdU8}a3n_KC>)Jra4e3)@i+k|;v}4mQ*bIy!|6B!XW}fJjdO4=&cpe*02ksST#QR_ zDK5k1xB^$=DqM|ga4oLG^|%2y;wIdTTW~9G!|k{Ocj7MGjeBq}?!*0f01x6JJd8*1 zC?3P(cmhx2DLjp5@GPFg^LPO-;w8L{SMVxc!|QkhZ{jVyjd##IuY)HXet$dPa@$@1 zgFmtxz(KvchR~4aXa@v%woLm$~PM2~}9F@58hI)K?6Fs4lNO4jUD@l~3N-`z6 zl0r!-Z-J*)(kN+_bV_<9gOX9nq-0jIC|Q+kN_HiO;;iIUaw)l$JW5{0Maie+R|+Tv zl|o8krHE2gDW()xN+>0jQc7v1j8ax9r<7MJC>0e~rIJ!vsUn}ot)^60YRKu9H5GTo zL#d_IR_Z8qm3m5jrGe5=X{0n(nkY?`W=eCVh0;>-R9Y!sinrpU_$q#iztUPUC;^I5 zX`=)xK}xU^qJ%1Kl`th-iBKYyc1nAtgVIsyq;yugC|#9qN_VA)(o^ZB^j7*PeU*Mn ze`SCYr3_RCDJEsGGDI1w3{!?HBb1R!v@%K=t&CB|D&v&#$^>PiGD(@NOi`vP)0FAT z3}vP=OPQ_AQRXW1l=;d6WudZ2S*$EkmMY7X<;n_WrLsy{t*lYjD(jT>$_8blvPs#j zY*Dr<+m!9f4rQmZOWCdLQT8hPl>N#9<)Cs%IjkH}jw;8L5p9<)QLOd8|B9o+{6j=gJG^rSeL7 zt-Mj*D({r{$_M46@=5uu$lEjW&zlNWQ*BgR)lQA0+N%z#qZ(I@r^Z(ks0r0Xs*{>n zO`;}Mlc~wo6lzK}m6}>jqo!5Usp-`WYDP7anpw@FW>vGP+0`7Xvzk-QrRG-isCiWv zHJ_SaEua=u3#o6h)b?rzwWHce?W}fDyQHsxL9jFdcP3mBEh&ogqrVdv}s3X;Ab(A_<9ixs_$EoAh3F<_3k~&$P zqE1z(sngXN>P&T(I$NEi&Q<5B^VJ3FLUob4SY4tnRhOyD)fMVWb(OkWU8Am5*Qx8( z4eCa9le$^mqHa~UsoT{Z>P~f+x?A0&?p61x`_%*LLG_S&SUsX1RgbC1)f4JT^^|&A zJ)@pg&#C9t3+hGnl6qOaqFz<6sn^vT>P_{QdRx7t-c|3Z_tgjLL-mpRSbd^CRiCNP z)fehZ^_BWseWSir->L7_59&wtllobe!*fDDd>}MoBW#78h$HNUgK!jaMLZE-BoGNj zBH<(wizFhcNG6ht6e6WaB~pttBCSX#(u)itqsSyOi!36m$R@Ij9Ku=T6uCrhkw@ee zE+U`EFA9i)qL3&oiio13m?$nvh?1g|C@so}vZ9a%Ra6tzMGfI5 zY6^GZA!>=*qK>F5>WTWIfoLciiN>OdXeyeC=AwmYDLh3h;U&CVPd!#Ax4U5F-nXUW5if7PK*~5#6&SkOcqnbR549V7c<06F-y!A zbHrRRPs|q!#6q!1EEY?|Qn5@d7c0a{u}Z8KYs6ZyPOKLj#7416Y!+L@R5#6fXL92Q5!QE^Nh7bnC?aY~#PXT(`?PMjAP#6@vQTozZvRdG#R z7dOOBaZB75cf?(BPuv#|#6$5&JQh#HQ}IkZ7caz1@k+cFZ^T>iPP`W%#7FT-d=~QZ zOj9-a@QG%l*=lxL9L-*H&>Xe6T0AYjmOx9WCDNR<#99(9sg_JjuBFgYYN@o;S{f~_ zmQG8rWzaHenY7GW7A>omP0Oz3(44iLS}rZOmPgC0xoG*c{8|C6pjJpLtQFCUYQ?nT zS_!SBR!S?amC?#-<+Soz1+Ajys#Ve|YgM$WS~ab@Rzq{sYHIG9hgM6gt<}-$YW1}G zS_7@2)<|otHPM=C&9vrP3$3N*skPF)G;hsE^VR$`f33A<&;m50)n<2d$&lN$ae2(Yk8gwC-9Dt*6#Y>#gS7{Mrb3oXl;}>S{tK{)y8S#wF%lpZIU)wo1#tCrfJi)8QM&3mNr|Pqs`Uk zY4f!O+CpuSwpd%DE!CE3%e58SN^O<4T3e&7)z)e2wGG-vZIiZH+oEmNwrSh79okN9 zm$qBmqwUr9Y5TPU+ClA*c33;29o3F$$F&pMN$r$&T05hi)y`?>wF}xs?UHs`yP{py zu4&h`8`@3nmUdgaV}6Q=se6SA{_ClGiE>z`?)4~c9luv(Kfw~dyP`KwxYpO$vfd8O zoeTN1JWJh5EG#jCdMR~6q8|cOo1se6{f~Cm=@Dvddz?sF%xFSESMFu zVRp=c&X^N(VQ$QWdC>*)VSX%t1+fqo#v)i0i(zprfhDmNmc}wz7RzCItbi5K6)Rz7 ztb$ds8dk>|=!P}X9X+rX*2X$m7wchtY=8~15jMsq*c6*#b8LYv(Gy#t7kZ-)`l28D zV{0^E02;9k24WBfV+e*~TMWZ+jKD~2hwZTgcEnED8M|Ot?1tU32lm8X*c*d2I0nb!I2?}?a3W5^$v6e4;xwF&GjJx(!r3?n z=i)q^j|*@iF2cpQ1efA6T#hSnC9cBNxCYnaI$Vz%a3gNQ&A0`(;x^olJ8&oN!rizB z_u@X>j|cD|9>T+T1drk|JdP*uB%Z?4cm~hnIXsUS@FHHq%XkH^;x)XEH}EFj!rOQU z@8UhYj}P!6KElWN1fSwFe2y>hCBDMf_y*tNJA98H@FRZ0&-ewc4Zo^d-Vmsv4cej| z#zA{@Ku3&=@i0Cnz=W6xoiH&b!K9cBlVb`@iK#F(roptB4%1@>%!rvVGiJf8m<_XI z4s^zxm0#-yj ztPAsC8nF$}{o0wb{9Zm2oK{CJc`HgIG(_hcnVMB89a;U@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=j zi}&z8KEQ|g2p{7Ue2UNTIljP`_zGX+8+?oJ@I8LOkN62c;}?{-2h2f5fg0MNE!trm zv_}VY#JCs_<6{C$h>6e%6JrugipelJrofb#3R7bmOpEC-J!Zg+m;O(V-YNh#jrS*z>-)BOJf-7)R z4Xa}fbiLgWIkv!-=!vb+3%$_?ebEp7u{9bn z0FBrN12G7LF$6=gErww@Mqnhi!}iz#J7Op7j9suRcEj%21AAgG?2Ub}FZRR!H~^z? zAPzzk4#puk6o=t(9DyS-8b{%19D`$V9FE5cI1wk|WSoLiaT-p?88{PX;cT3Pb8#Nd z#|5|$7vW-Df=h83F2@zP5?A4BT!U+I9j?a>xDhwuX54~XaT{*O9k>&B;cnc6dvPD` z#{+l}58+`vf=BTf9>)`S5>Mf2JcDQP9G=Guco8q*WxRq{@fu#o8+a3M;cdKwckv$H z#|QWjAK_zsf=}@oKF1gM5?|qKe1mWC9lpm8_z^$hXZ(V4a)71%M}ZpJpe@>A9JEIV zbi}wA594D3Oo)ll2@_)yOp3`cIi|prm{ z5Fg=Ve1cE$89v7s_!3{?YkY%m@g2U$5BL#3;b;7U)*Jk)jinzTP(vHEMLUdx_UM3) z7#HJVd`y4|F%ddpVoZWbF&QSu6qpiIVQNf+X)zt9#|)SeGht@Tf>|*eX2%@pj5#qE z=Egjj7hNzP=Enk95DQ^pEQ0!%C+MLV7RM4;5=&ueEQ4jS9G1rlSP@-O|8@pFRK_Y; z6{}%&tbuM=6Wvk&QU^WM!rE8|>ta2uj}5RPHp0f(1e>D%9R+%5jxDeydSWZ|LT~gz zU-UzNY>fsCKqI!nKn%iQ48c%ri(wd!5g3W>uswFbj@Su1V;Ag--LO0Mz@FF(dt)E$ zi~X=a4!|fJh=b6CgK-EB#bG!cN8m_|#!)yL$KY5ThvRVqPQ*z#8K>Y>oQBhJ2F}D; zI2-5ST%3pVaRDyGMYtH3;8I+M%W(y+#8tQ&*Wg-QhwE_zZp2Nv8Mok8+=kn62kyjO zxEuH2UfhTK@cNB9_@;8T2t&+!Gm#8>zl-{4z(hwt$Ne#B4s8NZ-)VxMYjsW0`f zg3yD8HfW1>7zgdq0Ua?e#>4oS025*&bi%}#1e0PiOpYlqC8omEm$c`z@!U_Q)`1yKL413eVN!dL{0Vlga^C9oux!qTXJZ-O4m zVmU026|f?@VkNAMRj?{n!|GTA-LNLQqX*W)+E@qcVm+*n4X`0L!p7JHn_@F;jxDey zdSWZ|LT~gzU-UzNY>fsCKqI!nKn%iQ48c%ri(wd!5g3W>uswFbj@Su1V;Ag--LO0M zz@FF(dt)E$i~X=a4!|fJh=b6CgK-EB#bG!cN8m_|#!)yL$KY5ThvRVqPQ*z#8K>Y> zoQBhJ2F}D;I2-5ST%3pVaRDyGMYtH3;8I+M%W(y+#8tQ&*Wg-QhwE_zZp2Nv8Mok8 z+=kn62kyjOxEuH2UfhTK@cNB9_@;8T2t&+!Gm#8>zl-{4z(hwt$Ne#B4s8NZ+I13F?{jEC`2|Kt05NQjBh2@_)yOp3`cIi|prm3lsD#v)i0^}lqj zhvHZQ^}jf;hf-J?%V1e7hvl&XRzz2f*q9c!Te2exDhwuX54~XaT{*O9k>&B;cnc6dvPD`#{+l}58+`vf=BTf z9>)`S5>Mf2JcDQP9G=Guco8q*WxRq{@fu#o8+a3M;cdKwckv$H#|QWjAK_zsf=}@o zKF1gM5?|qKe1mWC9lpm8_z^$hXZ(WZZ&8u;kQ&Ea9Vr4ev_V_6!#HUE+AaA}2bLW% zF2=+7m;e)EB6PyUm;{qzGE9ysFeRqK)R+d-VmeHZ889Pe!pxWjvtl;Pjycd7b7C&c zjd?IHx?n!cj|H$G7Q(_<1dC!ZERH3xB$mR`SO&{tIV_JAup-KDi!g^uSQ)EeRjh{9 zu?D(fO>{>OtcA6)4%WqbSRWf;Lu`bNu?aTCX4o8CU`zDGR_KM^=!3rKhyK_a4H$q% zY=eOqguxhsq1YC~FdQQ=65C;W?0_Ay6L!Wf*cH2BckF>Zu^0BnKG+xgVSgNeQ8*9> zp$P}$5FCoba5#>@kr<7ma5Rp=u{aLL;{=?DlW;Ol!KpY6r{fHqiL-Dv&cV4j59i|o zT!@QsF)qQSxD1!$3S5b+a5b*MwYUz~;|AP_n{YF3!L7Irx8n}niMwz&?!mpd5BK8% zJcx(zFdo69cnpu@2|S6X@HC#mvv>~A;|08km+&%P!K-);uj388iMQ}J-od+g5AWjx ze29B2 z+?WURq6?b86j?@jewGVhK`exYu?QB$VptqYU`Z^6rLhc_#d264D_})*#Y$Kit6){E zhSjkKx?xRpM-QxpwXqJ?#d=sD8(>3hgpIKYHpOPx99v*Z^u$)^h2H3czUYVk*cuHO zfJSVCff$6r7=oeL7Q-+cBQO%%VSDU=9kCO3#xB?uyJ2_ifjzMo_QpQg7yDs<9Dq?c z5C@?N2jdVNioNOhq7M-6Sz7VR(& z+M@$HVqA=e@i74=#6;+Xi7^Q##blTqQ(#I=g{d(Orp0ua9y4G@%!HXS3ueV^m>qMV zGv>rxm>ct8UUb2Hm>&yZK`exYu?QB$VptqYU`Z^6rLhc_#d264D_})*#Y$Kit6){E zhSjkKx?xRpM-QxpwXqJ?#d=sD8(>3hgpIKYHpOPx99v*Z^u$)^h2H3czUYVk*cuHO zfJSVCff$6r7=oeL7Q-+cBQO%%VSDU=9kCO3#xB?uyJ2_ifjzMo_QpQg7yDs<9Dq?c z5C@?N2jdVNioA9JEIV zbi}wA594D3Oo)ll2@_)yOp3`cIi|prm{ z5Fg=Ve1cE$89v7s_!3{?YkY%m@g2U$5BL#3;b;7U^2-1$?LP|C&<1VM4&$IbI-n!Q z#dsJW6JSD2gie?klVDOzhRHDnro>d38q;7}Oo!<)17^fbm>IKRR?LRkF$X$hPRxb5 zF%RZN7tDwGu>cmtLRc7!U{NfF#jymI#8Oxq%V1e7hvl&XRzz2f*q9c!Q) z)tJ21hxM@mHpE8Q7@J^IY=+IT1-3*_Y=vIvjXvm$e&~;_(SQMH#5Nd+ zK^Tl77>aE%48t)3Be5N}#}3#LJ7H(+f?cs2cE=vr6MJEA?1O!=ANI!q7=;6I5Snl> z4#A-~42Rcz=gO7 z7vmCKipy|0uE3SJ3RmMAT#M^)J#N5_xCuAo7Tk*4a69h6owy5k;~w0L`*1%Vz=L=Q z591L$ipTIcp1_lM3Qyx1Jd5Y>JYK+ycnL4#6}*bq@H*bWn|KRv;~l(<_wYVGz=!w< zALA2ziqG&lzQC9G3SZ+Je2ee!J$}HC_z6Gb7nENGWNH6VpoTVRi*^_X?a=`pF)qf# z_?Q3_Vj^_H#Fzw=VlqsQDKI6b!qk`s(_%VIj~Or{X2Q&v1+!u{%#JzG8FOMT%#C?4 zFS=kp%#Q`IAQr;HSOkk=F)WTHuq2kk(pUz|VmU026|f?@VkNAMRj?{n!|GTA-LNLQ zqX*W)+E@qcVm+*n4X`0L!p7JHn_@F;jxDeydSWZ|LT~gzU-UzNY>fsCKqI!nKn%iQ z48c%ri(wd!5g3W>uswFbj@Su1V;Ag--LO0Mz@FF(dt)E$i~X=a4!|fJh=b6CgK-EB z#bG!cN8m_|#!)yL$KY5ThvRVqPQ*z#8K>Y>oQBhJ2F}D;I2-5ST%3pVaRDyGMYtH3 z;8I+M%W(y+#8tQ&*Wg-QhwE_zZp2Nv8Mok8+=kn62kyjOxEuH2UfhTK@cNB9_@ z;8T2t&+!Gm#8>zl-{4z(hwt$Ne#B4s8NZ&K6 zm=F`86DGzam=u#?a!i3KF%_o9G?*6CVS3Df88H)P#w?f>vtf43fzFr{b75}GgL%;f z^I?80fCaG-7RDl26pLYTEP*Al6qd#^SQg7+d8~jH(G@FUWvqf#u^Lv#8t8^K(H%Xo z7S_f(SQqPIeQbaYu@N@LCfF34VRLMOEzuKOp%;3i5Bj1X`eSP}U;rAi4F+Nm24e_@ zVp|NuaE!o6Y=`Z!19rqt*crQESL}w}u?P0VUf3J^U|;Nq{c!+B;XoXOCLD}Ia3~JL z;Wz?EVlZzFARfZQcm$8) zF+7eZ@FbqX(|88Y;yFBz7w{rp!pnFCui`bljyLco-oo2>2k+uNypIp?AwI&#_ynKg zGklIO@Fl*&*Z2nC;yZkgAMhi7!q4~x<<|gP+J6+Np$*!i9mYX>bU;Upi}5f%CcuQ4 z2%Ru7Cc&hb43lFDOo^#5HKxI|m=4op2F!?=Ff(Sste6e6V-9r2oR|x9V;;17pF*d=b*bJLv3v7v=*b2SS8-36h{m>s88#yz+f_u+m#fCupq9>ybh6p!I? zJb@?i6rRR2coxs$dAxuZ@e*FfD|i*J;dQ)$H}MwU#yfZy@8NxXfDiEzKE@~b6rbU9 ze1R|V6~4wd_!i&cd;EYO@e_W=FQ_D>{YQZs+Mq4kVH~ta2Xw@^7!TuP0!)aB&Js)Gh-IairFwb=0Io6iMcR0=E1z^g848%7Qlj7 z2n%BoEQ-ajIF`VYSPDyH87zzCusl}4is*`!urgM`s#p!HV-0k}n&^%mSPN@o9juG> zus$}xhS&%jV-swO&9FJPz?SHVtUuCPRAKI6KCOUoP%?59?r)FxDXfNVqAhtaTzYh6}S>t z;c8riYjGW}#|^j-exUdJ1F6K~;dyn}b~9^S_X_z)lAV|;>7@fkkH7x)ri z;cI+@Z}A%!rvVGiJf8m<_XI4s^zxm)<8F`iSFouwXinU!Ma!v>th3K zh>fr@Ho>OY44Y#MY>A%O3cb)9eb5*E&>vf)0Rzy8Z7>jnFc?EH6x(7LhGPUqVmoY) z9k3&I!p_(QyJ9!&jya)K z7RTXuoPZN?5>Cb`I2EVibew@RaTdx4=M$CknF$-qJY?vK$pfl#gT$mg4U|w{=e3%~#U_mT|g|P@0#bQ_- zOJGSXg{83!mc?>d9xGr)bj3gMk=?!5D&}*cQVu93wCi+hKd`fE}?D zcE&E)6}w?~?14S87xu%QFG>*ZsI1b0- z1e}PIa57H8sW=U%;|!dMvv4-f!MQjO=i>rgh>LJBF2SX^442~yT#2i2HLk(6xDMCj z2Hc37a5HYft+)-h;||=3yKpz|!M(T-_u~OPh==en9>Jq{43FapJc+09G@ik;cn;6w z1-yut@G@S(t9T8s;|;utx9~RJ!Mk`5@8bh}h>!3wKEbE>44>l*e2K5{HNL^O_zvIW z2mFYi@H2ivB{A(k3e?aBZP582_QcQ-)F$Jc?RG1pm zU|LLv=`jOl#7vkOvtU-thS@O(I%7`Eg}E^g=0z9GhxxGp7Q{kW7>i(0EQZCg1eU~7 zSQ^Vw{^SFD7Uu?kkjYFHg>pc~dicl5woSR3nLU95-ou>m&3M%WmeU{h>{ z&9McxL{DsmUg(WJ=!<^nkFC*w0cgZF7>Gd_j3F3`Z7~ePF#;p89k#~~*bzHnXY7Jq zu^V>B9@rCmVQ=h%eX$?*#{n3H191?Va4-(Rp*ReO;|Lsy(Krf6;}{%^<8VAqz==2s zC*u^Hiqmj9&cK;C3uogToQv~tJ}$t8xCj^H5?qSQa5=8PmADF5;~HFx>u^18z>T;G zH{%xEira8I?!cY63wPrl+>85gKOVq?cnA;U5j={=@Hn2plXwbG;~6}Q=kPpUz>9bZ zFXI)wir4Tu-oTr93vc5cyo>knK0d&Q_y`~46MTx#@HxJ~m-q@_;~RX7@9;f-z>oL| zKjRlvlFd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{ zn_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFH zhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}B zw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd3 z8q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@ zg`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7} zOo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ z+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<) z17^fbm>IKRR?LRkF$d z0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4 zhwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fb zm>IKRR?LRkF$d0#?LI zSQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTg zcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKR zR?LRkF$d0#?LISQ)Ee zRjh{9u?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED z8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRk zF$d0#?LISQ)EeRjh{9 zu?E(}T38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot z?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(} zT38$FU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU3 z2lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$F zU|p<-^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X z*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<- z^|1jq#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq z#75W{n_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*cd38q;7}Oo!<)17^fbm>IKRR?LRkF$d0#?LISQ)EeRjh{9u?E(}T38$FU|p<-^|1jq#75W{ zn_yFHhRv}Bw!{!@g`wCQ+hAL4hwZTgcEnED8M|Ot?1tU32lm8X*c7)R4Xa}ftckU-HrBzqSP$!C18j(murW5l zrq~RdV+(AFA=nB-u{E~Aw%88aV+ZVrov<@@!LHa1yJHXRiM_Bl_QAf`5BuW)9EgK( zFb=_?I1Gp52pox{a5Rp=u{aLL;{=?DlW;PI;S`*T({MV@z?nD;XX6~4i}P?kF2IGj z2p8iLT#CzZIj+E!xC&R}8eEI(a6N9ojkpOn;}+bC+i*MXz@4}YcjF%1i~Ddt9>9Zm z2oK{CJc`HgIG(_hcnVMB89a;U@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=ji}&z8KEQ|g z2p{7Ue2UNTIljP`_zGX+8+?oJ@I8LOkN62c;}`sj-|##Bz@PXFf8!tgi~rC{LjTc0 z7d;dxQK63-4F(tv!(#-Dh>7)R4Xa}ftckU-HrBzqSP$!C18j(murW5lrq~Rd zV+(AFA=nB-u{E~Aw%88aV+ZVrov<@@!LHa1yJHXRiM_Bl_QAf`5BuW)9EgK(Fb=_? zI1Gp52pox{a5Rp=u{aLL;{=?DlW;PI;S`*T({MV@z?nD;XX6~4i}P?kF2IGj2p8iL zT#CzZIj+E!xC&R}8eEI(a6N9ojkpOn;}+bC+i*MXz@4}YcjF%1i~Ddt9>9Zm2oK{C zJc`HgIG(_hcnVMB89a;U@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=ji}&z8KEQ|g2p{7U ze2UNTIljP`_zGX+8+?oJ@I8LOkN62c;}`sj-|##Bz@PXFf8!tgi~rC{O8?P87d;dx zQK63-4F(tv!(#-Dh>7)R4Xa}ftckU-HrBzqSP$!C18j(murW5lrq~RdV+(AF zA=nB-u{E~Aw%88aV+ZVrov<@@!LHa1yJHXRiM_Bl_QAf`5BuW)9EgK(Fb=_?I1Gp5 z2pox{a5Rp=u{aLL;{=?DlW;PI;S`*T({MV@z?nD;XX6~4i}P?kF2IGj2p8iLT#CzZ zIj+E!xC&R}8eEI(a6N9ojkpOn;}+bC+i*MXz@4}YcjF%1i~Ddt9>9Zm2oK{CJc`Hg zIG(_hcnVMB89a;U@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=ji}&z8KEQ|g2p{7Ue2UNT zIljP`_zGX+8+?oJ@I8LOkN62c;}`sj-|##Bz@PXFf8!tgi~rC{M*q=47d;dxQK63- z4F(tv!(#-Dh>7)R4Xa}ftckU-HrBzqSP$!C18j(murW5lrq~RdV+(AFA=nB- zu{E~Aw%88aV+ZVrov<@@!LHa1yJHXRiM_Bl_QAf`5BuW)9EgK(Fb=_?I1Gp52pox{ za5Rp=u{aLL;{=?DlW;PI;S`*T({MV@z?nD;XX6~4i}P?kF2IGj2p8iLT#CzZIj+E! zxC&R}8eEI(a6N9ojkpOn;}+bC+i*MXz@4}YcjF%1i~Ddt9>9Zm2oK{CJc`HgIG(_h zcnVMB89a;U@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=ji}&z8KEQ|g2p{7Ue2UNTIljP` z_zGX+8+?oJ@I8LOkN62c;}`sj-|##Bz@PXFf8!tgi~rC{PXEzC7d;dxQK63-4F(tv z!(#-Dh>7)R4Xa}ftckU-HrBzqSP$!C18j(murW5lrq~RdV+(AFA=nB-u{E~A zw%88aV+ZVrov<@@!LHa1yJHXRiM_Bl_QAf`5BuW)9EgK(Fb=_?I1Gp52pox{a5Rp= zu{aLL;{=?DlW;PI;S`*T({MV@z?nD;XX6~4i}P?kF2IGj2p8iLT#CzZIj+E!xC&R} z8eEI(a6N9ojkpOn;}+bC+i*MXz@4}YcjF%1i~Ddt9>9Zm2oK{CJc`HgIG(_hcnVMB z89a;U@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=ji}&z8KEQ|g2p{7Ue2UNTIljP`_zGX+ z8+?oJ@I8LOkN62c;}`sj-|##Bz@PXFf8!tgi~rC{LI2S~7d;dxQK63-4F(tv!(#-D zh>7)R4Xa}ftckU-HrBzqSP$!C18j(murW5lrq~RdV+(AFA=nB-u{E~Aw%88a zV+ZVrov<@@!LHa1yJHXRiM_Bl_QAf`5BuW)9EgK(Fb=_?I1Gp52pox{a5Rp=u{aLL z;{=?DlW;PI;S`*T({MV@z?nD;XX6~4i}P?kF2IGj2p8iLT#CzZIj+E!xC&R}8eEI( za6N9ojkpOn;}+bC+i*MXz@4}YcjF%1i~Ddt9>9Zm2oK{CJc`HgIG(_hcnVMB89a;U z@H}3?i+Bky;}yJ$*YG;tz?*mrZ{r=ji}&z8KEQ|g2p{7Ue2UNTIljP`_zGX+8+?oJ z@I8LOkN62c;}`sj-|##Bz@PXFf8!tgi~rC{N&nG77d;dxQK63-4F(tv!(#-Dh*O4D z`~L-mbqQ+{5~D)bVPRFmLI>NHHL`1y&>{aBk}xDmS!-xmNV4Jo{Yq8VE*~l7(6G>F z^1mTP5>@!$*|z0auH{+6l9sZ3OIyYYtZ-I%D}oi#ieyE$qF7O_XjXJ9h85F_WyQAQ zSaGd*R(vagmC#CLCAN}SNv&j7ax2Au`;p2@ZKbi&TIsCxRt77hmC4F%WwEka*{tkV z4lAdX%gSx#vGQ8^to&91tDsfLDr^<8idx02;#LW(q*cl)ZI!XgTIHTeCO23mux!PXFKs5Q(QZjG=; zTBEGd));H7HO?AuO|T|fldQ>Bm^H|8_jn*b>v$e(AYHhQ&TRW_s)-G$ewa40P?X&h< z2dsnEA?vVp#5!snvyNLQtdrI$>$G*oI%}P?&RZ9(i`FIUvUSC}YF)FgTQ{tm)-CI{ zb;r7E-Lvjn53GmQBkQsC#CmEyvz}Wote4g+>$Ua9dTYJ2-di86kJcyav-QRLYJIc5 zTR*Iy)-UU~^~d^a{rm6uZQHS3+p~o&ZDsqmwv8Rw;q35s1UsS~$&PGCv7_42?C5q3 zJEk4Wj%~-WFo4&20Npj$su5H(`>)Q3~`gQ}mq20)CY&Wr++Rg0db_=_u9b&h# zL+#df8@sLD&TemaushnF?9O%=h)K58GckJ~5gllCe5w0*`tYoD{v+ZXJM_9gqWeZ{_NU$d{< zH|(4CE&H~8$G&Udv+vsv?1%Ov`?3AReri9npW83&m-Z|Bwf)9^YrnJK+aK(Y_9y$Z z{l)%jf3v^aKkT3OFZ;Ls$Np>o`|tAGj^ntF=LknS%JCiT7$cbkodixoCy|rbN#Z1Rk~ztp6i!Mfm6O^@+sWhPb@Dm+odQllr;t82{lvCO% znmNs# z7EVhi#A)S(I<1{HPFts))86UebaXm7ot-XDSErlP-Ra@GHaHudP0nU#i?h|)=4^L% zI6IwP&TeOqv)9?@>~{`02c1LCVdsc*)H&uHcTPAbom0+f=ZtgKIp>^rE;tvROU`BI zigVSu=3IAfI5(YJ&TZ$8bJw}&+;<*051mKOW9NzU)OqGScV0Lzomb9l=Z*8$dFQ-$ zJ~$tpPtIrOi}Tg_=6rX4I6s|V&Tr?B^Vj+J-_viqj_bOfD_rR+*LSsR+`tXzhIb>l z5#2~`WH*W%)s5yxcVoCQ-B@mHH;x@Nv8@LVKMs8!b ziQCj|<~Da*xGmifx0M^}wszaNZQXWmd$)tz(e31RcDuM;-EMAow};!)?dA4%`?!7G zer|tvfIH9~ox9%M;BIs`xtrZB z?pAl3yWQR4?sRv#yWKtRUU#3n-#y?SbPu_Q-6QT%_n3R!J>i~oPr0YvGwxaUoO|BA z;9hhuxtHB5?p61id)>X^-gIxdx7|DLUH6`Q-+kadbRW5o-6!r-_nG_Lec`@zU%9W{ zH||^ao%`PX;C^&Jxu4xH?pODl``!KF{&au2zuiCXU-#dC;kP}wEHAbf$BXO5^Wu96yo6pNFR_=zOX?-_l6xt< zlwK+?wU@?A>!tJ3dl|fpUM4TIm&MEKW%IIoIlP=+E-$y2$II*G^YVKIynVihCuzl3ppVv{%L}>y`7$dlkHjUL~)xSH-L9Rr9KQHN2W$Ew8p$$E)kr^XhvI zyoO#Qud&y}Yw9)gntLt0mR^Y0$_w>cdu_b7UOTV7*TL)Pb@DoUUA(SdH?O5cM6dt?#=LKdb7OQ-W+ePH_w~zE$|k4i@e3&5^t%u%v_h^Tkmb~ zHhP=9&E6JotGCVD?(Oh)db_;c-X3qSx6j+}9q`GML{HI6^cHq#Dz=I3Vu#o% zc8T3$kJu~riT&b$I4BN@!{UfIDvpWc;)FOUPKndvj5sUKiSy!uxF{}(%i@Z-Dz1s^ z;)b{>Zi(CCj<_rCiTmP#cqkr;$Kr{2DxQhw;)QrAUWwP@jd&~GiTC1z_$WS!&*F>t zD!z&D;)nPteu>}WkN7M82}|12k*@TlkWwn?OD&BIWH=dKMvxI@BpF#okx^wd8C}MZ zF=Z?nTgH)bWjq;QCXfkbBAHkwkx6ATnOvrjDP<~|TBea{WjdK&W{??WCYf1gky&Ln znO)|PIb|-HTjr5@Wj>i-7LWyHAz4@!kws-OSzMNoC1ojDT9%PzWjR@1R*)5CC0SWk zkyT|iSzXqUHDxVXTh@_vWj$G6HjoWvBiUFskxgYY*<7}eEoF#oB|~Lv*+#aN?PPn| zL3Wg#WM|n$c9q>^ciBVsl)Yqc*+=%3{bYYRKn|3Ho7^sU$enVR+%5OWy>g%2FAvCr@{l|%kI19)m^>~| z$dmGvJT1@2v+|rgFE7Z8@{+tPugI(Nn!GM=$eZ$(ye;p@yYimAFCWN<@{xQjpU9{3 znS3r^$d~e!d@bL|xAL8QFF(kS@{{~5zsRrhoBS?+$e;3;{4M{;zw)27l&u`)Do+U| zl~TUa%BVnvQ{hzv6;VY}kyR8GRYg1u|Wsb;C! zYL1$#=BfE=fm)~*sl{rETB??*Z})~WSsgW9Mzsm*GO+N!px?P`bG zsdlN|YLD8h_No2qfI6rSsl)1sI;xJTWn(8&Z+b2g1V?Ksmtn$x~i_J z>*|KOscxy;>W;dr?y39gfqJMOsmJPxda9nO=jw%esa~nq>WzA<-l_NMgZijGsn6<* z`l`OE@9Ky8seY;7>W})X{wd41eaClw&lkS*mGAr7H-6xU^TYcQ{D^)eKe8XikLpMB zqx&)Zn0_ojwjalj>&Nrs`w9Gnej-1ypTtk&xoPI7px1Yz)>*w?H`vv@hej&fGU&Jr!7xRnzCH#_pDZjK|#xLua z^UM1c{EB`hzp`J&uj*IxtNS(lntm<6wqM7u>(}$^`wjetej~rJ-^6d~H}jkOE&P^# zh~LT&^;`RG{I-5OzrEkV@91~(JNsSyu6{SayWhj_>G$$``+fYrem}pzKfoX85Ap~5 zL;RusFn_o|!XN36@<;n){IUKxf4o1zpXg8WC;MUk6o0Be&7bbi@Mrq7{Mr5-f382z zpYJd57y66*#r_h1slUu$?yvAy`m6la{u+O+zs_IpZ}2zzoBYlG7JsY1&EM|t@OS#V z{N4T@f3LsK-|rvr5Bi7v!~PNfsDI2q?w{~a`ltNU{u%$Qf6hPeU+^#bm;B5A75}P# z&A;y7@NfFJ{M-H=|E_<}zwbZrANr5{$Nm%lssGG>?!WL~`mg-g{u}?T|IUB!fABy0 zpZw4M7yqmO&HwKI@PGQh{NMf`|F8egx3sMt?P^a8Ew$3V*4pSmhtuJ81RYUF(vfu( z9aTru(RBw(`Q|HpTbsn8p=hOLh0bNiR(uH*qT~rs-#dQf?QkT-Dbs1e&m(%5S z1zk~B(v@`;T~$}p)pZSBQ`ge9bsb$-*VFZN1Km(J(v5W!-BdTz&2FIigo~dW)*?NwitLN$YdVyZ37wN@%iC(Ig z>E(KbUa42<)q0IytJmrEdV}7mH|foKi{7fY>Fs)l-l=!#-FlDStM}>s`hY&D59!1D zh(4;1>Err@KB-UX)B21)tIz54`hvcwFX_wrioU9^>FfH2zNv5N+xm{atMBRi`hk9^ zAL+;XiGHe|>F4@|eyLyS*ZPfqtKaGO`h)(cKk3iF@f7{;7ZI-};aKtN&@s z*v2ug@r*FiDB~M#j0sFQ6W&BH5lti$*+el>O*9kT#4s^UEEC(rF>y^i6W=5-2~8rC z*d#GYO)`_*q%bK>DwEozF=+UO*WI=9)G#$oEmPanF?CHn zQ{OZ&4NW7{*fcRsO*7Nnv@k7Ah-qa)O>5J}v^DKad(**mG@VRm)5UZ(-As4W!}K)0 zOmEZ2^fmoVe>1=gG=t1wGsFxv!_06q!i+Sd%xE*lj5XuTcr(FFG?UC^6K1BEsb-p) zZf2O7W|o<4=9sx=o|$hJn1yDMS!|Y=rDmB~ZdRC;W|diO)|jY&X}|2oH=hUn2Y9; zxoobOtLB=yZf=;H=9al_?wGsgp1E%xn1|+(d2F7Tr{==2XTV9LA)S-kRV7HBnlD-NrI$7vLJboB1jpe3Q`AYg0w-p zAbpS_$QWb_G6z|LtU(0SGzuCAO@gLD zv!HpK|8gvV~2R(wGL9d{9&?o2{^b7h2 z1A>9UpkQz?Bp4bD3x)?Hf|0?fV017h7#oZW#s?FEiNT~`au60w38n_qg6YAGU}i8Y zm>tXs<_7bE`N4uw@*chG1i`DcBrr3AP5? zg6+YMU}vx^*d6Q%_6GZc{lS6YU~nin92^Ob2FHTq!HM8xa4I+*oC(eb=YsRWh2Uav zDYzV539bg$g6qMJ;AU_uxEFmtT z%B;H-cXzkq?xjGXEzkmOad#Bp?F?s6YccFn|dxU;_uZ zzym%A0hvH%kOgD~*+6!X1LOp`KyHu+VUeS9;go*fQFzEXbhTwrl1*U4#GeS z5Dr>`R-iSA0Bt~95DB6{G-wCfgAO1D#Db2X6X*=$Ks-nQT|igR4Ri-RKu^#M^ag!E zU(gTq2Lr%BFbE6=L%>il3=9V&z(|k?MuE{_3>XW>f$?AhNCFeVBrq9F0aL*=FdfVQ zGr=q{8_WT7!8|Y@EC36^BCr@N0ZYL$upF!aE5RzT8ms|p!8))WYycamilQKeD4N0)L$MS` z@f4v1N~9!8rW8u0G)kuo%A_pHrX0$pJj$m+s7zF5DhrjB%0^|Ua!@&`TvTo<50#h7 zN9CsqPz9+%RAH(JRg@}56{kv2C8<(WX{roWmMTY;rz%htsY+C3stOfKRi&y?)u|d( zO{x}Eo2oPPjb22ca3LDXPs2sM-% zMh&M%P$Q{CY7{k^8bghx#!=&`2~-j_k(xwJrlwF+scF=7Y6dlvnnlf~=1_B~dDMJr z0kx1?L@lP4P)n(0)N*PCwUSyzt)|vcYpHeAdTIl;k=jIUrnXR9scqDDY6rEG+C}ZA z_E3ANebj#H0CkW$L>;D%P)Dg_)N$$rb&@(oouIQX_x<%cl?ofBBd(?gE0ril2L_MaSP*166)N|?u^^$r;y{6t!Z>e|Gd+G!Ak@`e^ zroK>Lsc+PG>Ie0c`bGVw0$?hb8m57PFf9y%>0o-80cM235I_n-h#(CyWFQMU$U_1J zC_)L!P=PAcpbiaaLJQi^fiCo*4?|!km>Fh)Sz$Jq9p->JVJ?^(=7D)(KA0aCfCXV8 zSQr+8MPV^m9F~A3VJTP|mVsqqIanT6fE8gSSQ%DRaf!S=8NjDfMRBkTk_!#EfZ z6JQtE6?TK&VGr07_JX}(AJ`Z6gZ<$EI1mnkgW(W36b^&K;RrYqCc;s0G#mrR!f|js zoB)&HL^ugfhEw2FI1NsRGvG`(3(kgf;9NKl&W8)&LbwPnhD+d5xC}0bE8t4F3a*B0 z;99s2u7?}oMz{%XhFjoPxD9THJK#>Z3+{${;9j^7?uQ59L3jurhDYF0cnltgC*VnV z3Z8~%;8}PMo`)CUMR*BbhF9QKcnw~MH{eZp3*Lrz;9YnR-iHt1L-+_jhEL#A_zXUW zFW^h~3ciMK;9K|(zK0*+NB9YThF{=U_zixCKj2UJ3;u=yC>2VL(x5<;76qYnC_T!6 zGNNDv5QQK@h(;JOh(#RY5kUeHk%VNVAQfpyM+P#Hg>2*?7kS7>At)2djIyAtC>zR- za-f_j7s`$Dpu8v_%8v@5f~XKGjEbP5s2D1aN}!Uc6e^9%pt7hODvv6lil`E*jH;ke zR25Z2)lm&p6V*bsQ5{ql)kF1B1Jn>TLXA-q)D$&C%~2R?fx=Ns)C#pm5vUDniy~1J zibm~Fd(;8NpjgxqbwZs{9EwK?s0-?fx}ol<2kMD>q28zu>WliJ{%8Oihz6m-Xb2jL zhN0nT1R9AF(I_+;jX`74I5ZwjKuKsKnuI2!DQGI1hNhz#XeOG4W}`W1E}DntqXlRo zT7(v(C1@#HhL)ohXeC;OR--j&En0`xqYY>y+JrWvEodv+hPI;}XeZi*cB4ILFWQIp zqXXz5I)o0RBj_kPhK{2X=p;IYPNOsEEINnIqYLOFx`ZyHE9fe^hOVO<=q9>_ZlgQs zF1m;AqX+0AdW0UMC+I19hMuDr=p}lEUZXeYEqaIEqYvmK`h-5CFX$`!hQ6a8=qLJx zexm?76`h(+LkH4n=^#2Cou1A>XQYE^KvOiN5lz#WW@wh?Xr3mtK#R0Q%d|qPv_|W+ zL7TKi+q6Twv`71N2%U+}OlP69(%I8f-!x;kBhu1VLTYtwb;x^z9dKHY$B zNH?Mz(@p56bThg+9Y(jH!|9fEE4nouLARmX(vfr&9Zk2R+tVHB7&?~jNOz(;({Xe> zoj`Y?yVBk0?sN~jC*6zgP4}Vu(*5ZE^Zuf z5&9^7j6P1Epik1L=+pEW`Ye5pK2KkuFVdIj%k&lcDt(QAKs zJJ`h@_HhW#gfrtTI4jPEv*R2%C(ea)<2*Po&WH2k0=OV9gbU*$xF{}$i{lcwBrb(Z z<1)A`E{DtG3b-P!ge&7JI22dK)o^uO1J}g0aBW-%*Twa4ecS*y#Eo!c+ypnp&2V!Z zhFjop+!D9Kt#JfygWKXr9EGECJKP?3z%e)$cf_4=XB>y)aRTmwyW(!RJMMvd;$FBn z?t}Z{ez-p#fCu71crYGY)wn8r*KrYX~mY0iW(EtqhoCDV#&%|tM5n6^wL6U9U` z?U?pV2PTGzWjZpQn9fWb6VD_tU6`&+H>Nw&gXziiVtO-un7&Lurav=)8ORJ`1~WsL zq0BI5I5UD7$s{tPn9EhW;Qd2naj*$ z<}(YJh0G#mF|&kO$}D4+Gb@;t%qnIzvxZsAtYg+Q8<>sECT26Uh1tq%W41Fpn4Qcn zW;e5k*~{!>_A>{VgUli3Fmr@C${b^kGbfmn%qiwHbA~y~oMX;27nqC8CFU}7g}KUH zW3Dqdn48Qk<~DPOxy#&R?lTXVhs-19G4q6Z$~SW4<##n4ioq<~I|-reagGY1lwEEgQt9W7D%4*o*&*yu zb{IRH9l?%d6WLMhXm$)cmL12AXD6^p>_m1FJDHurPGzUD)7cs9Om-GKo1MeXW#_T; z*#+!Eb`iUnUBWJ9m$A#)73@lO6}y^U!>(o5vFq6l>_&DIyP4g>Ze_Qz+u0rLPIec& zo880iW%sfB*#qoB_7HoRJ;EMkkFm$u6YNR$6nmOI!=7c&vFF(f>_zqxdzrn$US+Sb z*V!BFP4*Uho4v!{W$&@~*$3=H_7VG-eZoFvpRv!`7wk*+75kcf!@gzTvG3Ur>__$! z`A4JCMlP5G9K}HnaWscHhGRL7 z<2k|!oXAO>%qg78X`Id(oXJ_7%{iRQd7RIMaGAKwTox`XmyOHL<=}F1xwzb19xgAJ zkIT;$;0khuxWZf!t|(WGE6$bRN^+&R(p(v?ELV;z&sE?ma+SEsToo>qtIAd5s&h5C znp`ceHdlwM%hluRa}BtLTqCYA*Mw`zHRGCdVO$F?oNLLo;#zYNTpO+}7s*9&(Of&O zJ=cMY;bOUtTqmwG7sthO30xPhE7y(d&h_AWa=p0TTpzA4*N^MZ4d4cHgSf%m5N;?p zj2q64;6`$Z+$e4|H-;O_jpN316SyR9A~%Vf%uV5@a?`l!+zf6eH;bFi&Ee*9^SJrk z0&XF z%zfd$a^JY`+z;+2_lx_@1@Ni()O;E~kWb48@#*;Vd<*V`4`5Jsp zz7}7bufx~n>+$vZ27E)l5#N|^!Z+oc@y+=#z6Br7x8z&#t@#MP4d0fJ@KALftnNBLv?asC8`P7Goai`c{=F7b#@LP#c(nPeeZNj8$5r0A!SK9Ql3;G6-gyhnN%U6q$;UKs*@U|CaFbglRBg>sYmLQ z2BaZrL>iMOq$z1env*cnf`pTnq!npRB1jw3mPC>$5>48X_M`)eA+e+*=|nn{I1*11 zNEgzTbR*qK57LwLBE3l;(wFoj{mB3_kPIS&$q+J>3?swI2r`l+l2K$d8AHaBab!H1 zK$6HrGKowkQ^-^@jZ7yq$V@Ve%qDZlTr!W$Ckx0zvWP4uOUP2Pj4UTB$V#$`tR`#7 zTC$F;CmYB{vWaXaTgX?V82Ub2tuCkMzua)=xzN61lfj2tH?$VqaF zoF-?;S#pk?Cl|;?a*13fSIAXzja(-;$W3yK+$ML(U2>1yClAO&@`yYpPsmg9j65eV z$V>8yye4nRTk?*)Cm+a1@`-#VU&vSTjeI9R$WQW%{3ZcHDj~IyMhFzr3PD0TA-#}6 z$S4F0K%fLDAb}RJzzD3s3A{iAK@bH=kOf6h1x?TeLofwPumwkO1yAsW5FwM0S;!(} z6|xE0g&aaoA(xO_$Rp$x@(KBc0zyHdkWg4CA`}&h3B`pHLP?>NP+BM>loiSe<%J4D zMWK>VS*Rj}3RQ(_LUo~rP*bQS)E4Rpb%lCDeW8KSP-rAH7Mci6g=Ru?Axvl?gbOW& zRzhnbLTDqj6(WTwAzElBv==%EF+!}+QRpOe7UG0>AwlRObQQV@-Gv@PPobC4Tj(S7 z75WMNg#p4qVURFb7$OW6h6%%k5yD6zQ5YqR7RCr;g>k}oVS)v6lMvtg*n1pVV*EwSRgDE772@mCBjl+nXp_~A*>Wu39E%Q!dhXSuwK|8Y!o&L zn}sdHR$-g4UDzS)6m|)_g+0PvVV|&HI3OGp4he^aBf?SPm~dP;A)FLW38#fK!dc;* za9+3|Tof(|mxU|BRpFX&UAQ6K6mAK(g*(Ds;hu0`cpy9!9tn?yC&E+VnebeAA-oh` z39p4W!du~;@Lu>Jd=x$jpM@{NSK*uRUHBpV6n+W6g#a;?m|9FD28wCLATgboUd$k7 z6oW+|QX&+QNQ+oxL{{WPUL>L*ilQXSq9UrIChDRgnxZAzq9eMZC;DQDm`ThmW)ZWB z*~IK(4l$>gOUy0i5%Y@q#Qb6bv7lH;EG!lgi;Bg>;$jK0q*zKUEtV0>isi)eVg<3H zSV^obRuMzRs$w;!T3Db^Bei*>}hVm-0G*g$M3HWC|)O~j^RGqJfCCbkg6#g<|# zv9%Z>wh`Nkkz$k>Ew&Teiyg!mF;?s-b`m>_abmofAa)VEirvKSVh^#W*h}m!_7VGv z{lxy_0CAu=NE|E<5r>My#Npxyaio|ijuJd?`fABxRPeNLi(9 zQg$halvBzj<(BeDd8K?(eyM;|P%0!9mWoJ4rD9TXsf1KgDkYVc%1C9Ua#DGzf>cqe zBvqEGNTE_yshU(>sv*^sYDu-FI#OM!o>X6IAT^X4NsXl@Qd6m!)LaUaT1eqiOR1IA zT8fa`NNuG^DN2f#+DYxD4pNL1D|M7QNu8xQDPBsDx=3B6Zc=xthtyN*CH0p2NPVS# zQh#ZHG*B8O4VH#TL#1KTaA|}zQc9FYNu#AP(pYJnG+vq@B}o&dNz!C#iZoT4CQX-S zNHe8b(rjstG*_A@&6gHP3#CQUVrhxAR9Yr2msUtCrB%{uX^pg2S|_cSHb@(#P10s* zi?mhRCT*8?NIRun(r#&wv{%|E?UxQn2c<*OVd;o;R5~Udmrh70rBl*r>5Oz%Iwzf% zE=U)pOVVZOigZ=FCS8|qNH?Wh(rxLEbXU43-IpFn52Z)aW9fomtIIOrB~8x z>5cSOdMCY?K1d&>Pts@Ui}Y3cCViKFNI#`t(r+n1P9>+7)5w8xS~*BgC#RP)$Qk8e z8OW3jWhB!wmKm9qIhmJ&oANFBwtPpvE8mmv%Mav-@+0}N{6u~#Ka-!!FXWf5li$l9K(G*=V6jQMjTX7Uu@f2SPQ8Fo+l`KkDC7Y66 z$)V&_aw)l$JW5_ApORlGpcGUJDTS3HN>QblQd}valvGM7rIj*DS*4s(Ua6o|R4OTz zl`2Z8QdOy@R99*!HI-UQZKaMUCqr@s5l}<`$B~FP~5|l1VSEZZMUFo6pRC+1Bl|D*erJvGY z8K4YQ1}TG;A<9r?m@-@$p^Q`#l~KxQWsEXb8K;a_CMZeDL}ijPS(&0tRi-J^l^M!R zWtK8qnWM~A<|*@)1qAXRGDa(}=%1ULGvRYZAtX0-2>y-`4MrD(-S=pj& zRkkVHl^x1XWtXyB*`w@L_9^?71Ij_=kaAc#q8wF@DaVx)%1Pyva#}f~oK?;#=amb} zMdgxmS-GNIRjw)5l^e=U<(6_=xue`w?kV?`2g*a`k@8r1qC8ceDbJM`%1h;y@>+SL zyj9*Q@0AbAN9B|9S^1)TRlX_Tl^@Da<(KkX2~bn1sns-Upqf?G@N4b+BeBek*GL~W`zQ=6+{Y6~@7ZK<|WTdNUj z8?~((sYa>MYCE;P+Chy`W7UpoC$+O0r^c%ZY8SPu+D+}Q_E3AOz0}@nAGNRAPwlS` zPzS1m)WPZyb*MT_9j=a0N2-bHD0Q?tMjfk;Q^%_l)FgGHI!T?ZPEn_-)70te40WbD zOP#IGQRk}j)cNWHb)mXQU92uqm#WLuIQYAx=G!vZc(?Y z+tlsq4t1xxOWm#RQTM9*)cxuK^`Lr4J**y4kE+MiIL

    Ie0s`bqt)eo?=w-_-Bw5A~<|OZ}||XsNW+S{f}-OREKG>9q7(1}&o&tO1SE zpoTPB!y2Qp8mI9Z(F9G@Bu&;7P1Q6_*9^_nEX~#&&DA{3*Fv;RT4pVamQ~B9W!G|O zIkj9`ZY__NSIej6*9vF_wL)59t%z1sE2b6KN@yjuQd()Pj8;}FrvzzHPxDF&9yMCg%+;0)LLn+wFs?^ z)>ezuqO@qOoz`CKpv7phT1Tyu)>(_w;rGN_16Yy z1GPcgU~PytR2!xZ*G6a~wM1={Hd-5_jn&3!y z+IDS+wo}`s?bh~ad$oPqe(ivEP&=d@){baLwPV_G?SytxJEfi0&S+<~bJ}_Bf_726 zq+QmoXjips+I8)Qc2m2h-PZ1CceQ)keeHqvP33hflldA zM>?%zozYpH(|Miff-dTkF6)Y}>YA?WhHmPXZtITj>YncFA$le~vz|rIs%O))>pAqC zdM-V;o=4BC=hO4+1@wY?A-%9(L@%lr(~IjR^pbihy|i9NFRPc+%j*^Nih3ozvR*|G z)vM~&^y+#Iy{2AEudUb7>+1FN`g#Msq25SutT)k{>do}#dYIlq57%4jt@PGjU(G z`XGIi>f`kB`UE{mpQumLC+k!6srod1x;{gnsn619 z>vQzE`aFHUzCd57FVYw5OZ27sGJUzeLSLz`(pT$i^tJjreZ9Ux->7fWH|tyUt@<{7 zyS_u;sqfNv>wEOQ`aXTXen3B{AJPx&NA#omG5xrHLO-dW(ogGW^t1Xo{k(obzo=i* zFY8zItNJzlx_(2yso&CX>v#0K`aS)={y=}IKhhuTPxPnyGyS>#LVu~h(qHRu^tbvu z{k{G{|EPb`KkHxgulhIryZ%G}ssGY{>j6e8Bejvn2sF|fK}I?wy^+DlXapO;pbTgr zgEp|i7_7k=yg>}X5Dm$Y4aHCm&Cm_QFb&JF4aaZ|&+v^9Ba@NY$YNwQvKiTp97awf zmyz4ZW8^jR8TpL@MnR*HQP?PA6g7$&#f=h1Nu!ie+9+d`HOd*~jS5CZqmohCsA7Z~ zRgG#!b)$w+)2LJlG&UKVjV;DjW1F$v z*kSB6b{V^kJ;q*RpRwOKU>r0K8HbG{#!=&#aojjzoHR}ur;RhlS>v2>-nd{~G%gvJ zjVs1gZW0ure)fuW4fki`eulk$;@nKF|(T4%qz-(wXG8>ys%%)~Dv$+{&wlKrZmS!unwHaZyG25Dv zW|SFiwlmwC9n2Ur*6e6@GCP}bX1tkTb}_q}-OTQ053{G)%j|9TG5ebR%>L#8bD%lM z9Bd9ThnmC8;pPZ)q?u@rGDn+Z%(3P;bG$jhOfn~$lg!EH6mzOM&75w|FlU;x%-QA~ zbFMkhoNq2L7n+OA#pV)oskzKtZmuv_nybv!<{ERYxz1c~ZZJ2Ro6ODT7IUk)&D?J8 zFn5}}%-!Z5bFaD2+;1K*51NO}!{!n5sCmphZk{ktny1Xu<{9&>dCoj8$it1}mc#Yype1poJ{j z!WLt(7H9Dmu>?!BBulmwOSLphw+zd)EX%eW%e6eqw?eE;R%R=UmDS2-Ww&xzIjvk) zZYz(K*UD$*w+dJVtwL5|tB6(9DrOb8N?0YWQdViJj8)buXO*`qSQV{GR%NS-6>3$r zs#(>o8dgoKmQ~xTW7W0lS@o?3Rzs_i)!1rcHMN>q&8;x2g%xhKv|3rMtq7}))z*r% zqO54Eoz>pzV8vLmR!6Io)!B-(;;jU$i`CWYW_7oESUs&?R&T41)z|80^|uCC1Fb>U zU~7mq)EZ_Dw?%P zbFF#Sd~1QV&{||Iww72+t!377YlXGaT4k-a)>vz;b=G=ogSFAxWNo&#SX-@a)^=-$ zwbR;V?Y8zx6aEI%S=<&RA!ybJlt5f_2flWL>td zSXZrU)^+QKbxK2wdS$(~-dJy~ch-CB zgZ0t+WPP^2SYNGg)_3cN_0#%gCI86GPGzUI)7XJ_T06*2XQ#I_*ct6$8`zW$ZDi9n zwi%nXIh(hME!d(h*|M$Js;$|&ZP=!5*|zQ2uI<^r9b#v)Guv70tadg#yPd<%Y3H(Y z+j;D~c0N15UBE7A7qSc6MeL$>F}t{3!Y*l-vP;`#?6P(_yS!b&u4q@XE8A7~OoK-O6rlN7!xbwsxc) zWk=iX?DlpCJI0Q+JKCM>&UTy~ZztGY?5=hFSD21E9{l_DtooP#$Ic$v)9`j?2Yy&d$Ya8-fC~Nx7$1H zo%Sw!x4p;SYwxr7+Xw7}_96SQeZ)R$AG43!C+w5;<-z@Z%IAcuCa z!#J$NIlMz0!4VzFksZZR9nH}l!!aGpu^q>89nbNd5GRw9*~#K$b+S3xog7Y1Czq4k z$>Zd8@;UjP0!~4vkW<(x;uLj?ImMk4PD!VfQ`#xxly%BE<(&#nMW>Qe*{R}$I#r!& zPIae-Q`4#C)OPAPb)9-neW!ub&}rl}cA7X%on}sRC(LQ#ggY&rR!(at!fE5Qbt0W8 zC)#P}w0AlP;m7CDQZCC*Z3nX}wk;jDC4IjfyD&RS=kv)j*Ip7?04mpRNBhFFhm~-4Y;hc0%Ij5a7&ROT2bKbe&Ty!oumz^ul zRp**>-MQi1bZ$AfojcB5=bm%ldEh*B9yyPlC(cvnne*Iv;kKQeGrxvAYWZlIgi4RX`D>D>%&MmN|6F6BZOxwMO2 z#${d3?*G6YOd}YuIXB??K-aOdamz=xS8C{ZWcGIo6XJc=5TYmx!l}t z9yhO>&&}@^a0|MH+`?`Vx2RjpE$)_ZOS+}p(ry{ItXs}4?^bXtx|Q6@ZWTAwt?E{D ztGhManr(+DYyA9liZX>s`+r(|^HglW1VQvdI+->Q$a$CC*ZX36)8|g;5 z(QZ4pz1zW!abw+%ZYQ_18|TKm32qm+tJ}@(?)Gqdy1m@qZXdU=+t2Oq4sZv$gWSRH z5O=6M%pLBIa7Vg{?kIP(JH{RBj&sMm6Wk`rl~y3^e0?hJRPJIkHz&T;3u z^W6FF0(YUi$X)C%ahJNw+~w{Hccr_^UG1)M*ShQ6_3j3Dqr1u7>~3+ly4&3C?hbdS zyUX3}?s50J``rER0r#ML$UW>HagVyk+~e*E_oREuJ?)-x&${Q_^X>)rqI=1`>|SxN zy4T$6?hW^*d&|A;-f{1`_uTvL1NWi($bIZSai6-++~@8K_oe&FeeJ$+-@5PI_wEPx zqx;GI?0#{-y5HRI?hp5;`^)|926(Bw)Lt4d&`awDdFj0LUIs6t7wiF#@}P%2+QS~> zu^#8~9`OWE^dwLA6i@XuPxlPZ^eoTz9MAPU&-X&SOkQR$iDtHyWN?v8JiWllt^{RQ* zy&7Ikua;NatK-%6>Us6O23|w2k=NL3;x+Y}dCk2ruZ0)xwe(tft-T1Zjn~$T^rF0I zubtQ4>)^$Bv0g{7lh@gc^Wwb(uZ!2!>*jU$dU!p(US4mnkJs1h=k@mncmusb-e7Nt zH`E*E4fjTPBfUg#lsDQNnaUXnM_o8(RQrg&4mY2I{ihBwoj<<0iycyqmZ z-h6L?x6oVUE%ugpOTA^@a&LvV(p%-N_SSf7y>;GtZ-ckd+vIKbws>2;ZQgcohqu$) zs4q?}B&HyX0N=u6S3y zYuPrYZ}bMJ-s(tG8-_TG4Jy?5Sw?}PWz z`{aH0zIb1~Z{BzBhxgO_<^A>o{8WBwKaC&gr}cyUbbfk2gP+k4_JL3N&__P)W1sO^ zpYwU2_<}F`k}vy;ulky=`-X4&mT&ux@A{ta`yqZNKeM03&+2FMv->&xoPI7px1Yz) z>*w?H`vv@hej&fGU&Jr!7xRnzCH#_pDZjK|#xLua^UM1c{EB`hzp`J&5B011)%@yy z4Zo&e%dhR%@$35a{Q7!{GNUJRgW`y>33exg6hAMKCv$NJ;^@%{uq$)D&?@+bRK{Hgvlf4V=zpXtx?XZv&fx&AzV zzQ4d<=r8gY`%C<#{xW~LzrtVXuku&>Yy7qTI)A;t!Qbd_@;Cci{H^{rf4jfK-|6r2 zcl&$%z5YIbzkk3#=pXVA`$znv{xSc!f5JcMpYl)pXZ*ANIsd$W!N2HV@-O>W{Hy*o z|GIy}zv8I=$p)jmh(i1y*}QC%Wh zN3@R*PfDczd;OoSX}d>+caBR6O_dnbAvPvHGFc5s3~U|V{g0BqeMET2e->#YV-r%K z0g354M8zb;NBq^}B3i}9wEm-m4pFT-$NtL@n8Kb^Ju&Sc?qnrRGFh?+Orc7aL4S#o zsz*Ob9S~4GG5B8u$?kMrBO==U-H{tvoz$@CkmRZ) zr;;irC7}#|uSryoh@|9Ego)^X3m6s|7892gttX@ZRxsL1R-@hjUH>5ph|ZM47Z9EK zugLOOWc@3${T12&iX4AM&SX(NI@h0P?my2wDUP7%ynhydvW(96N6-J~Ss=xoDtS_3 z!DMAv^j~yha7=j2f9hCm4~Z`H2Nqp8S)}9=TqYqdDkdT>u3U0Cqwgg5Me@~(zMmok zLNkO0q+HNH{Q&*}R!S*&Z#K1qrosd*ED8rwDj!7v^kvyld za&pYFK?tT8|FQi~q~vHRQBqnW5tmJfi;wN_PrQ_aC5QX}SW{xA8lIT0T5OD2p>tGH z=IZ|({NHr2PDF>OWJr8unTWPgF)3XCw`oH1OCBDY;cuS*ftQbI{U^`=GW>;#f1nxv zoAuwzKd_W8PE7v?l*0Az#!UpZ62d#jM|A$rnf}}GH%ywpVg3jDhvRGVS0v@Ie_d(* z6z88WMq-9W$xZSf#(%B;(xv)Cmo5eLKe_!!{|f+r0D%pXdnG0Fzseui-;Ys-6i-;) zgyepW9-4BGQV$RPOB$TwsnO<7t|?ppYyI~B*#2FBpbCxh z=gFTph4&vdg)%vKP-wcu!1l>>e{W7u`D8WX&+Yt2{loL;MFjqHasMj+>j Date: Fri, 14 Aug 2020 12:50:20 -0700 Subject: [PATCH 272/632] CLN: remove unused variable (#35726) --- pandas/core/window/rolling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 0306d4de2fc73..966773b7c6982 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -318,7 +318,7 @@ def __repr__(self) -> str: def __iter__(self): window = self._get_window(win_type=None) - blocks, obj = self._create_blocks(self._selected_obj) + _, obj = self._create_blocks(self._selected_obj) index = self._get_window_indexer(window=window) start, end = index.get_window_bounds( From 0658ce38754ffb3ef44121c2fe7a810a809b268c Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Fri, 14 Aug 2020 16:59:09 -0400 Subject: [PATCH 273/632] agg with list of non-aggregating functions (#35723) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/groupby/generic.py | 25 +++++++++++-------- pandas/core/groupby/groupby.py | 10 +++++--- .../tests/groupby/aggregate/test_aggregate.py | 13 ++++++++++ 4 files changed, 35 insertions(+), 14 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 85e2a335c55c6..565b4a014bd0c 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -26,6 +26,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) +- Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b7280a9f7db3c..b806d9856d20f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -322,11 +322,14 @@ def _aggregate_multiple_funcs(self, arg): # let higher level handle return results - output = self._wrap_aggregated_output(results) + output = self._wrap_aggregated_output(results, index=None) return self.obj._constructor_expanddim(output, columns=columns) + # TODO: index should not be Optional - see GH 35490 def _wrap_series_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Index, + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy operation into the expected result. @@ -335,7 +338,7 @@ def _wrap_series_output( ---------- output : Mapping[base.OutputKey, Union[Series, np.ndarray]] Data to wrap. - index : pd.Index + index : pd.Index or None Index to apply to the output. Returns @@ -363,8 +366,11 @@ def _wrap_series_output( return result + # TODO: Remove index argument, use self.grouper.result_index, see GH 35490 def _wrap_aggregated_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> Union[Series, DataFrame]: """ Wraps the output of a SeriesGroupBy aggregation into the expected result. @@ -383,9 +389,7 @@ def _wrap_aggregated_output( In the vast majority of cases output will only contain one element. The exception is operations that expand dimensions, like ohlc. """ - result = self._wrap_series_output( - output=output, index=self.grouper.result_index - ) + result = self._wrap_series_output(output=output, index=index) return self._reindex_output(result) def _wrap_transformed_output( @@ -1720,7 +1724,9 @@ def _insert_inaxis_grouper_inplace(self, result): result.insert(0, name, lev) def _wrap_aggregated_output( - self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]] + self, + output: Mapping[base.OutputKey, Union[Series, np.ndarray]], + index: Optional[Index], ) -> DataFrame: """ Wraps the output of DataFrameGroupBy aggregations into the expected result. @@ -1745,8 +1751,7 @@ def _wrap_aggregated_output( self._insert_inaxis_grouper_inplace(result) result = result._consolidate() else: - index = self.grouper.result_index - result.index = index + result.index = self.grouper.result_index if self.axis == 1: result = result.T diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 4597afeeaddbf..0047877ef78ee 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -974,7 +974,9 @@ def _cython_transform(self, how: str, numeric_only: bool = True, **kwargs): return self._wrap_transformed_output(output) - def _wrap_aggregated_output(self, output: Mapping[base.OutputKey, np.ndarray]): + def _wrap_aggregated_output( + self, output: Mapping[base.OutputKey, np.ndarray], index: Optional[Index] + ): raise AbstractMethodError(self) def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]): @@ -1049,7 +1051,7 @@ def _cython_agg_general( if len(output) == 0: raise DataError("No numeric types to aggregate") - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) def _python_agg_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs @@ -1102,7 +1104,7 @@ def _python_agg_general( output[key] = maybe_cast_result(values[mask], result) - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) def _concat_objects(self, keys, values, not_indexed_same: bool = False): from pandas.core.reshape.concat import concat @@ -2534,7 +2536,7 @@ def _get_cythonized_result( raise TypeError(error_msg) if aggregate: - return self._wrap_aggregated_output(output) + return self._wrap_aggregated_output(output, index=self.grouper.result_index) else: return self._wrap_transformed_output(output) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 40a20c8210052..ce9d4b892d775 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1061,3 +1061,16 @@ def test_groupby_get_by_index(): res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])}) expected = pd.DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A") pd.testing.assert_frame_equal(res, expected) + + +def test_nonagg_agg(): + # GH 35490 - Single/Multiple agg of non-agg function give same results + # TODO: agg should raise for functions that don't aggregate + df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]}) + g = df.groupby("a") + + result = g.agg(["cumsum"]) + result.columns = result.columns.droplevel(-1) + expected = g.agg("cumsum") + + tm.assert_frame_equal(result, expected) From 6325a331b2c3a06609b7689c55fcc186a3cace7f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 14 Aug 2020 14:01:02 -0700 Subject: [PATCH 274/632] BLD: bump xlrd min version to 1.2.0 (#35728) --- ci/deps/azure-37-locale_slow.yaml | 2 +- ci/deps/azure-37-minimum_versions.yaml | 2 +- doc/source/getting_started/install.rst | 2 +- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/compat/_optional.py | 2 +- pandas/tests/io/excel/test_readers.py | 43 +++++++------------------- 6 files changed, 16 insertions(+), 37 deletions(-) diff --git a/ci/deps/azure-37-locale_slow.yaml b/ci/deps/azure-37-locale_slow.yaml index 3ccb66e09fe7e..8000f3e6b9a9c 100644 --- a/ci/deps/azure-37-locale_slow.yaml +++ b/ci/deps/azure-37-locale_slow.yaml @@ -24,7 +24,7 @@ dependencies: - pytz=2017.3 - scipy - sqlalchemy=1.2.8 - - xlrd=1.1.0 + - xlrd=1.2.0 - xlsxwriter=1.0.2 - xlwt=1.3.0 - html5lib=1.0.1 diff --git a/ci/deps/azure-37-minimum_versions.yaml b/ci/deps/azure-37-minimum_versions.yaml index 94cc5812bcc10..05b1957198bc4 100644 --- a/ci/deps/azure-37-minimum_versions.yaml +++ b/ci/deps/azure-37-minimum_versions.yaml @@ -25,7 +25,7 @@ dependencies: - pytz=2017.3 - pyarrow=0.15 - scipy=1.2 - - xlrd=1.1.0 + - xlrd=1.2.0 - xlsxwriter=1.0.2 - xlwt=1.3.0 - html5lib=1.0.1 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 7ab150394bf51..4c270117e079e 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -287,7 +287,7 @@ s3fs 0.4.0 Amazon S3 access tabulate 0.8.3 Printing in Markdown-friendly format (see `tabulate`_) xarray 0.12.0 pandas-like API for N-dimensional data xclip Clipboard I/O on linux -xlrd 1.1.0 Excel reading +xlrd 1.2.0 Excel reading xlwt 1.3.0 Excel writing xsel Clipboard I/O on linux zlib Compression for HDF5 diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index a3bb6dfd86bd2..42f95d88d74ac 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -122,7 +122,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | xarray | 0.12.0 | X | +-----------------+-----------------+---------+ -| xlrd | 1.1.0 | | +| xlrd | 1.2.0 | X | +-----------------+-----------------+---------+ | xlsxwriter | 1.0.2 | X | +-----------------+-----------------+---------+ diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 6423064732def..81eac490fe5b9 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -27,7 +27,7 @@ "tables": "3.4.3", "tabulate": "0.8.3", "xarray": "0.8.2", - "xlrd": "1.1.0", + "xlrd": "1.2.0", "xlwt": "1.2.0", "xlsxwriter": "0.9.8", "numba": "0.46.0", diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index b610c5ec3a838..51fbbf836a03f 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -1,9 +1,7 @@ -import contextlib from datetime import datetime, time from functools import partial import os from urllib.error import URLError -import warnings import numpy as np import pytest @@ -14,22 +12,6 @@ from pandas import DataFrame, Index, MultiIndex, Series import pandas._testing as tm - -@contextlib.contextmanager -def ignore_xlrd_time_clock_warning(): - """ - Context manager to ignore warnings raised by the xlrd library, - regarding the deprecation of `time.clock` in Python 3.7. - """ - with warnings.catch_warnings(): - warnings.filterwarnings( - action="ignore", - message="time.clock has been deprecated", - category=DeprecationWarning, - ) - yield - - read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"] engine_params = [ # Add any engines to test here @@ -134,21 +116,19 @@ def test_usecols_int(self, read_ext, df_ref): # usecols as int msg = "Passing an integer for `usecols`" with pytest.raises(ValueError, match=msg): - with ignore_xlrd_time_clock_warning(): - pd.read_excel( - "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 - ) + pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 + ) # usecols as int with pytest.raises(ValueError, match=msg): - with ignore_xlrd_time_clock_warning(): - pd.read_excel( - "test1" + read_ext, - sheet_name="Sheet2", - skiprows=[1], - index_col=0, - usecols=3, - ) + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=3, + ) def test_usecols_list(self, read_ext, df_ref): if pd.read_excel.keywords["engine"] == "pyxlsb": @@ -597,8 +577,7 @@ def test_sheet_name(self, read_ext, df_ref): df1 = pd.read_excel( filename + read_ext, sheet_name=sheet_name, index_col=0 ) # doc - with ignore_xlrd_time_clock_warning(): - df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) + df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) tm.assert_frame_equal(df1, df_ref, check_names=False) tm.assert_frame_equal(df2, df_ref, check_names=False) From a3f5c6a5a3f05edc1c56ab8051f28c4cf322c45c Mon Sep 17 00:00:00 2001 From: estasney Date: Fri, 14 Aug 2020 17:01:49 -0400 Subject: [PATCH 275/632] Fix broken link in cookbook.rst (#35729) --- doc/source/user_guide/cookbook.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/cookbook.rst b/doc/source/user_guide/cookbook.rst index 49487ac327e73..7542e1dc7df6f 100644 --- a/doc/source/user_guide/cookbook.rst +++ b/doc/source/user_guide/cookbook.rst @@ -765,7 +765,7 @@ Timeseries `__ `Aggregation and plotting time series -`__ +`__ Turn a matrix with hours in columns and days in rows into a continuous row sequence in the form of a time series. `How to rearrange a Python pandas DataFrame? From ed116557544a622ef295db45a5a00d2fa9f2cbc7 Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Mon, 17 Aug 2020 09:50:00 +0100 Subject: [PATCH 276/632] CI: Min Pytest Cov Version/Restrict xdist version (#35754) --- ci/deps/azure-windows-37.yaml | 2 +- ci/deps/azure-windows-38.yaml | 2 +- ci/deps/travis-37-cov.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 4d745454afcab..f4c238ab8b173 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21 + - pytest-xdist>=1.21,<2.0.0 # GH 35737 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index f428a6dadfaa2..1f383164b5328 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21 + - pytest-xdist>=1.21,<2.0.0 # GH 35737 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml index 3a0827a16f97a..edc11bdf4ab35 100644 --- a/ci/deps/travis-37-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -10,7 +10,7 @@ dependencies: - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 - - pytest-cov # this is only needed in the coverage build + - pytest-cov>=2.10.1 # this is only needed in the coverage build, ref: GH 35737 # pandas dependencies - beautifulsoup4 From 0abfc7e24702266820ae1e7888a8374151ebef04 Mon Sep 17 00:00:00 2001 From: sanderland <48946947+sanderland@users.noreply.github.com> Date: Mon, 17 Aug 2020 12:22:34 +0200 Subject: [PATCH 277/632] REGR: Fix interpolation on empty dataframe (#35543) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/generic.py | 3 +++ pandas/tests/frame/methods/test_interpolate.py | 7 +++++++ 3 files changed, 11 insertions(+) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 565b4a014bd0c..b1fd76157b9f1 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -19,6 +19,7 @@ Fixed regressions - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`) - Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) - Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) +- Fixed regression where :meth:`DataFrame.interpolate` would raise a ``TypeError`` when the :class:`DataFrame` was empty (:issue:`35598`). - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in :meth:`DataFrame.diff` with read-only data (:issue:`35559`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2219d54477d9e..9cbe2f714fd57 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6892,6 +6892,9 @@ def interpolate( obj = self.T if should_transpose else self + if obj.empty: + return self + if method not in fillna_methods: axis = self._info_axis_number diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index ddb5723e7bd3e..3c9d79397e4bd 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -34,6 +34,13 @@ def test_interp_basic(self): expected.loc[5, "B"] = 9 tm.assert_frame_equal(result, expected) + def test_interp_empty(self): + # https://github.com/pandas-dev/pandas/issues/35598 + df = DataFrame() + result = df.interpolate() + expected = df + tm.assert_frame_equal(result, expected) + def test_interp_bad_method(self): df = DataFrame( { From 934e9f840ebd2e8b5a5181b19a23e033bd3985a5 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 17 Aug 2020 05:59:08 -0500 Subject: [PATCH 278/632] REGR: Don't ignore compiled patterns in replace (#35697) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/core/internals/managers.py | 23 ++++++++++++++++----- pandas/tests/frame/methods/test_replace.py | 8 +++++++ pandas/tests/series/methods/test_replace.py | 10 +++++++++ 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index b1fd76157b9f1..4fb98bc7c1217 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -27,6 +27,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) - Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) +- Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` where compiled regular expressions would be ignored during replacement (:issue:`35680`) - Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 371b721f08b27..5a215c4cd5fa3 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -2,7 +2,17 @@ import itertools import operator import re -from typing import DefaultDict, Dict, List, Optional, Sequence, Tuple, TypeVar, Union +from typing import ( + DefaultDict, + Dict, + List, + Optional, + Pattern, + Sequence, + Tuple, + TypeVar, + Union, +) import warnings import numpy as np @@ -1907,7 +1917,10 @@ def _merge_blocks( def _compare_or_regex_search( - a: ArrayLike, b: Scalar, regex: bool = False, mask: Optional[ArrayLike] = None + a: ArrayLike, + b: Union[Scalar, Pattern], + regex: bool = False, + mask: Optional[ArrayLike] = None, ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -1918,7 +1931,7 @@ def _compare_or_regex_search( Parameters ---------- a : array_like - b : scalar + b : scalar or regex pattern regex : bool, default False mask : array_like or None (default) @@ -1928,7 +1941,7 @@ def _compare_or_regex_search( """ def _check_comparison_types( - result: Union[ArrayLike, bool], a: ArrayLike, b: Scalar, + result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern], ): """ Raises an error if the two arrays (a,b) cannot be compared. @@ -1949,7 +1962,7 @@ def _check_comparison_types( else: op = np.vectorize( lambda x: bool(re.search(b, x)) - if isinstance(x, str) and isinstance(b, str) + if isinstance(x, str) and isinstance(b, (str, Pattern)) else False ) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index a3f056dbf9648..8603bff0587b6 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1573,3 +1573,11 @@ def test_replace_dict_category_type(self, input_category_df, expected_category_d result = input_df.replace({"a": "z", "obj1": "obj9", "cat1": "catX"}) tm.assert_frame_equal(result, expected) + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + df = pd.DataFrame(["a", "b", "c"]) + regex = re.compile("^a$") + result = df.replace({regex: "z"}, regex=True) + expected = pd.DataFrame(["z", "b", "c"]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index 11802c59a29da..f78a28c66e946 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -1,3 +1,5 @@ +import re + import numpy as np import pytest @@ -415,3 +417,11 @@ def test_replace_extension_other(self): # https://github.com/pandas-dev/pandas/issues/34530 ser = pd.Series(pd.array([1, 2, 3], dtype="Int64")) ser.replace("", "") # no exception + + def test_replace_with_compiled_regex(self): + # https://github.com/pandas-dev/pandas/issues/35680 + s = pd.Series(["a", "b", "c"]) + regex = re.compile("^a$") + result = s.replace({regex: "z"}, regex=True) + expected = pd.Series(["z", "b", "c"]) + tm.assert_series_equal(result, expected) From 39d1217643ab67b1ef3931fc5a21d32d663fb5a0 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Mon, 17 Aug 2020 08:11:53 -0500 Subject: [PATCH 279/632] BLD: update min versions #35732 (#35733) --- pandas/compat/_optional.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 81eac490fe5b9..689c7c889ef66 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -11,25 +11,25 @@ "fsspec": "0.7.4", "fastparquet": "0.3.2", "gcsfs": "0.6.0", - "lxml.etree": "3.8.0", - "matplotlib": "2.2.2", - "numexpr": "2.6.2", + "lxml.etree": "4.3.0", + "matplotlib": "2.2.3", + "numexpr": "2.6.8", "odfpy": "1.3.0", "openpyxl": "2.5.7", "pandas_gbq": "0.12.0", - "pyarrow": "0.13.0", - "pytables": "3.4.3", + "pyarrow": "0.15.0", + "pytables": "3.4.4", "pytest": "5.0.1", "pyxlsb": "1.0.6", "s3fs": "0.4.0", "scipy": "1.2.0", - "sqlalchemy": "1.1.4", - "tables": "3.4.3", + "sqlalchemy": "1.2.8", + "tables": "3.4.4", "tabulate": "0.8.3", - "xarray": "0.8.2", + "xarray": "0.12.0", "xlrd": "1.2.0", - "xlwt": "1.2.0", - "xlsxwriter": "0.9.8", + "xlwt": "1.3.0", + "xlsxwriter": "1.0.2", "numba": "0.46.0", } From 3b0f23ad6825590c30212f0da593752b65db0bd6 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Mon, 17 Aug 2020 15:34:59 +0100 Subject: [PATCH 280/632] REF: StringArray._from_sequence, use less memory (#35519) --- asv_bench/benchmarks/strings.py | 15 +++++++ doc/source/whatsnew/v1.1.1.rst | 5 +++ pandas/_libs/lib.pyx | 51 ++++++++++++++-------- pandas/core/arrays/string_.py | 25 +++-------- pandas/core/dtypes/cast.py | 16 ++----- pandas/tests/arrays/string_/test_string.py | 14 +++--- 6 files changed, 73 insertions(+), 53 deletions(-) diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index d7fb2775376c0..2023858181baa 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -7,6 +7,21 @@ from .pandas_vb_common import tm +class Construction: + + params = ["str", "string"] + param_names = ["dtype"] + + def setup(self, dtype): + self.data = tm.rands_array(nchars=10 ** 5, size=10) + + def time_construction(self, dtype): + Series(self.data, dtype=dtype) + + def peakmem_construction(self, dtype): + Series(self.data, dtype=dtype) + + class Methods: def setup(self): self.s = Series(tm.makeStringIndex(10 ** 5)) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 4fb98bc7c1217..c028fe6bea719 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -76,6 +76,11 @@ Categorical - Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`) - +**Strings** + +- fix memory usage issue when instantiating large :class:`pandas.arrays.StringArray` (:issue:`35499`) + + .. --------------------------------------------------------------------------- .. _whatsnew_111.contributors: diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 5fa91ffee8ea8..eadfcefaac73d 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -618,35 +618,52 @@ def astype_intsafe(ndarray[object] arr, new_dtype): @cython.wraparound(False) @cython.boundscheck(False) -def astype_str(arr: ndarray, skipna: bool=False) -> ndarray[object]: - """ - Convert all elements in an array to string. +cpdef ndarray[object] ensure_string_array( + arr, + object na_value=np.nan, + bint convert_na_value=True, + bint copy=True, + bint skipna=True, +): + """Returns a new numpy array with object dtype and only strings and na values. Parameters ---------- - arr : ndarray - The array whose elements we are casting. - skipna : bool, default False + arr : array-like + The values to be converted to str, if needed. + na_value : Any + The value to use for na. For example, np.nan or pd.NA. + convert_na_value : bool, default True + If False, existing na values will be used unchanged in the new array. + copy : bool, default True + Whether to ensure that a new array is returned. + skipna : bool, default True Whether or not to coerce nulls to their stringified form - (e.g. NaN becomes 'nan'). + (e.g. if False, NaN becomes 'nan'). Returns ------- ndarray - A new array with the input array's elements casted. + An array with the input array's elements casted to str or nan-like. """ cdef: - object arr_i - Py_ssize_t i, n = arr.size - ndarray[object] result = np.empty(n, dtype=object) - - for i in range(n): - arr_i = arr[i] + Py_ssize_t i = 0, n = len(arr) - if not (skipna and checknull(arr_i)): - arr_i = str(arr_i) + result = np.asarray(arr, dtype="object") + if copy and result is arr: + result = result.copy() - result[i] = arr_i + for i in range(n): + val = result[i] + if not checknull(val): + result[i] = str(val) + else: + if convert_na_value: + val = na_value + if skipna: + result[i] = val + else: + result[i] = str(val) return result diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index bb55c3cdea45c..381968f9724b6 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -177,11 +177,10 @@ class StringArray(PandasArray): def __init__(self, values, copy=False): values = extract_array(values) - skip_validation = isinstance(values, type(self)) super().__init__(values, copy=copy) self._dtype = StringDtype() - if not skip_validation: + if not isinstance(values, type(self)): self._validate() def _validate(self): @@ -200,23 +199,11 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): assert dtype == "string" result = np.asarray(scalars, dtype="object") - if copy and result is scalars: - result = result.copy() - - # Standardize all missing-like values to NA - # TODO: it would be nice to do this in _validate / lib.is_string_array - # We are already doing a scan over the values there. - na_values = isna(result) - has_nans = na_values.any() - if has_nans and result is scalars: - # force a copy now, if we haven't already - result = result.copy() - - # convert to str, then to object to avoid dtype like '>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype=np.dtype('str')) array(['1.0', '2.0', None], dtype=object) """ - subarr = np.array(values, dtype=dtype, copy=copy) if dtype is not None and dtype.kind == "U": - # GH-21083 - # We can't just return np.array(subarr, dtype='str') since - # NumPy will convert the non-string objects into strings - # Including NA values. Se we have to go - # string -> object -> update NA, which requires an - # additional pass over the data. - na_values = isna(values) - subarr2 = subarr.astype(object) - subarr2[na_values] = np.asarray(values, dtype=object)[na_values] - subarr = subarr2 + subarr = lib.ensure_string_array(values, convert_na_value=False, copy=copy) + else: + subarr = np.array(values, dtype=dtype, copy=copy) return subarr diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 6f9a1a5be4c43..efd5d29ae0717 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -206,12 +206,16 @@ def test_constructor_raises(): @pytest.mark.parametrize("copy", [True, False]) def test_from_sequence_no_mutate(copy): - a = np.array(["a", np.nan], dtype=object) - original = a.copy() - result = pd.arrays.StringArray._from_sequence(a, copy=copy) - expected = pd.arrays.StringArray(np.array(["a", pd.NA], dtype=object)) + nan_arr = np.array(["a", np.nan], dtype=object) + na_arr = np.array(["a", pd.NA], dtype=object) + + result = pd.arrays.StringArray._from_sequence(nan_arr, copy=copy) + expected = pd.arrays.StringArray(na_arr) + tm.assert_extension_array_equal(result, expected) - tm.assert_numpy_array_equal(a, original) + + expected = nan_arr if copy else na_arr + tm.assert_numpy_array_equal(nan_arr, expected) def test_astype_int(): From 9b54f9d89bb8d7543d96029c23f58026ae116375 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 17 Aug 2020 13:20:19 -0500 Subject: [PATCH 281/632] Pass check_dtype to assert_extension_array_equal (#35750) --- doc/source/whatsnew/v1.1.1.rst | 1 + pandas/_testing.py | 10 ++++++++-- pandas/tests/util/test_assert_extension_array_equal.py | 9 +++++++++ pandas/tests/util/test_assert_frame_equal.py | 8 ++++++++ pandas/tests/util/test_assert_series_equal.py | 8 ++++++++ 5 files changed, 34 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index c028fe6bea719..ff5bbccf63ffe 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -38,6 +38,7 @@ Bug fixes ~~~~~~~~~ - Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). +- Bug in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` where extension dtypes were not ignored when ``check_dtypes`` was set to ``False`` (:issue:`35715`). Categorical ^^^^^^^^^^^ diff --git a/pandas/_testing.py b/pandas/_testing.py index 713f29466f097..ef6232fa6d575 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1377,12 +1377,18 @@ def assert_series_equal( ) elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype): assert_extension_array_equal( - left._values, right._values, index_values=np.asarray(left.index) + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), ) elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype): # DatetimeArray or TimedeltaArray assert_extension_array_equal( - left._values, right._values, index_values=np.asarray(left.index) + left._values, + right._values, + check_dtype=check_dtype, + index_values=np.asarray(left.index), ) else: _testing.assert_almost_equal( diff --git a/pandas/tests/util/test_assert_extension_array_equal.py b/pandas/tests/util/test_assert_extension_array_equal.py index d9fdf1491c328..f9259beab5d13 100644 --- a/pandas/tests/util/test_assert_extension_array_equal.py +++ b/pandas/tests/util/test_assert_extension_array_equal.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas import array import pandas._testing as tm from pandas.core.arrays.sparse import SparseArray @@ -102,3 +103,11 @@ def test_assert_extension_array_equal_non_extension_array(side): with pytest.raises(AssertionError, match=msg): tm.assert_extension_array_equal(*args) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_extension_array_equal_ignore_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = array([1, 2, 3], dtype="Int64") + right = array([1, 2, 3], dtype=right_dtype) + tm.assert_extension_array_equal(left, right, check_dtype=False) diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index fe3e1ff906919..3aa3c64923b14 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -260,3 +260,11 @@ def test_assert_frame_equal_interval_dtype_mismatch(): with pytest.raises(AssertionError, match=msg): tm.assert_frame_equal(left, right, check_dtype=True) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = pd.DataFrame({"a": [1, 2, 3]}, dtype="Int64") + right = pd.DataFrame({"a": [1, 2, 3]}, dtype=right_dtype) + tm.assert_frame_equal(left, right, check_dtype=False) diff --git a/pandas/tests/util/test_assert_series_equal.py b/pandas/tests/util/test_assert_series_equal.py index a7b5aeac560e4..f3c66052b1904 100644 --- a/pandas/tests/util/test_assert_series_equal.py +++ b/pandas/tests/util/test_assert_series_equal.py @@ -296,3 +296,11 @@ def test_series_equal_exact_for_nonnumeric(): tm.assert_series_equal(s1, s3, check_exact=True) with pytest.raises(AssertionError): tm.assert_series_equal(s3, s1, check_exact=True) + + +@pytest.mark.parametrize("right_dtype", ["Int32", "int64"]) +def test_assert_series_equal_ignore_extension_dtype_mismatch(right_dtype): + # https://github.com/pandas-dev/pandas/issues/35715 + left = pd.Series([1, 2, 3], dtype="Int64") + right = pd.Series([1, 2, 3], dtype=right_dtype) + tm.assert_series_equal(left, right, check_dtype=False) From 61e3fd0ad3f2a675c53a347eb34937d703419ffc Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Mon, 17 Aug 2020 19:27:45 +0100 Subject: [PATCH 282/632] MAINT: Initialize year to silence warning (#35763) Initialize year to silence warning due to subtracting from value that compiler cannot reason must be either initialized or never reached closes #35622 --- pandas/_libs/tslibs/parsing.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 8429aebbd85b8..7478179df3b75 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -381,7 +381,8 @@ cdef inline object _parse_dateabbr_string(object date_string, datetime default, object freq): cdef: object ret - int year, quarter = -1, month, mnum, date_len + # year initialized to prevent compiler warnings + int year = -1, quarter = -1, month, mnum, date_len # special handling for possibilities eg, 2Q2005, 2Q05, 2005Q1, 05Q1 assert isinstance(date_string, str) From 8f0d0380beafdb64e9952099a820bc813498c88d Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 17 Aug 2020 11:28:27 -0700 Subject: [PATCH 283/632] REF: implement reset_dropped_locs (#35696) --- pandas/core/groupby/generic.py | 24 ++--------------------- pandas/core/internals/managers.py | 32 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b806d9856d20f..1f0cdbd07560f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1111,6 +1111,7 @@ def blk_func(block: "Block") -> List["Block"]: assert len(locs) == result.shape[1] for i, loc in enumerate(locs): agg_block = result.iloc[:, [i]]._mgr.blocks[0] + agg_block.mgr_locs = [loc] new_blocks.append(agg_block) else: result = result._mgr.blocks[0].values @@ -1124,7 +1125,6 @@ def blk_func(block: "Block") -> List["Block"]: return new_blocks skipped: List[int] = [] - new_items: List[np.ndarray] = [] for i, block in enumerate(data.blocks): try: nbs = blk_func(block) @@ -1136,33 +1136,13 @@ def blk_func(block: "Block") -> List["Block"]: deleted_items.append(block.mgr_locs.as_array) else: agg_blocks.extend(nbs) - new_items.append(block.mgr_locs.as_array) if not agg_blocks: raise DataError("No numeric types to aggregate") # reset the locs in the blocks to correspond to our # current ordering - indexer = np.concatenate(new_items) - agg_items = data.items.take(np.sort(indexer)) - - if deleted_items: - - # we need to adjust the indexer to account for the - # items we have removed - # really should be done in internals :< - - deleted = np.concatenate(deleted_items) - ai = np.arange(len(data)) - mask = np.zeros(len(data)) - mask[deleted] = 1 - indexer = (ai - mask.cumsum())[indexer] - - offset = 0 - for blk in agg_blocks: - loc = len(blk.mgr_locs) - blk.mgr_locs = indexer[offset : (offset + loc)] - offset += loc + agg_items = data.reset_dropped_locs(agg_blocks, skipped) return agg_blocks, agg_items diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 5a215c4cd5fa3..f05d4cf1c4be6 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1504,6 +1504,38 @@ def unstack(self, unstacker, fill_value) -> "BlockManager": bm = BlockManager(new_blocks, [new_columns, new_index]) return bm + def reset_dropped_locs(self, blocks: List[Block], skipped: List[int]) -> Index: + """ + Decrement the mgr_locs of the given blocks with `skipped` removed. + + Notes + ----- + Alters each block's mgr_locs inplace. + """ + ncols = len(self) + + new_locs = [blk.mgr_locs.as_array for blk in blocks] + indexer = np.concatenate(new_locs) + + new_items = self.items.take(np.sort(indexer)) + + if skipped: + # we need to adjust the indexer to account for the + # items we have removed + deleted_items = [self.blocks[i].mgr_locs.as_array for i in skipped] + deleted = np.concatenate(deleted_items) + ai = np.arange(ncols) + mask = np.zeros(ncols) + mask[deleted] = 1 + indexer = (ai - mask.cumsum())[indexer] + + offset = 0 + for blk in blocks: + loc = len(blk.mgr_locs) + blk.mgr_locs = indexer[offset : (offset + loc)] + offset += loc + return new_items + class SingleBlockManager(BlockManager): """ manage a single block with """ From f97c2af16131d55c22533b030e08a34e99394cb7 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 17 Aug 2020 11:58:31 -0700 Subject: [PATCH 284/632] BUG: close file handles in mmap (#35748) --- pandas/io/common.py | 5 ++++- pandas/tests/io/parser/test_common.py | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pandas/io/common.py b/pandas/io/common.py index 54f35e689aac8..d1305c9cabe0e 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -18,6 +18,7 @@ Optional, Tuple, Type, + Union, ) from urllib.parse import ( urljoin, @@ -452,7 +453,7 @@ def get_handle( except ImportError: need_text_wrapping = (BufferedIOBase, RawIOBase) - handles: List[IO] = list() + handles: List[Union[IO, _MMapWrapper]] = list() f = path_or_buf # Convert pathlib.Path/py.path.local or string @@ -535,6 +536,8 @@ def get_handle( try: wrapped = _MMapWrapper(f) f.close() + handles.remove(f) + handles.append(wrapped) f = wrapped except Exception: # we catch any errors that may have occurred diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 3d5f6ae3a4af9..1d8d5a29686a4 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1836,6 +1836,7 @@ def test_raise_on_no_columns(all_parsers, nrows): parser.read_csv(StringIO(data)) +@td.check_file_leaks def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers From 961b8d4b9190238b1bfab8b47550ab1c472b2db0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Mon, 17 Aug 2020 14:59:42 -0400 Subject: [PATCH 285/632] TST: encoding for URLs in read_csv (#35742) --- pandas/tests/io/parser/test_network.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 509ae89909699..b30a7b1ef34de 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -46,6 +46,21 @@ def check_compressed_urls(salaries_table, compression, extension, mode, engine): tm.assert_frame_equal(url_table, salaries_table) +@tm.network("https://raw.githubusercontent.com/", check_before_test=True) +def test_url_encoding_csv(): + """ + read_csv should honor the requested encoding for URLs. + + GH 10424 + """ + path = ( + "https://raw.githubusercontent.com/pandas-dev/pandas/master/" + + "pandas/tests/io/parser/data/unicode_series.csv" + ) + df = read_csv(path, encoding="latin-1", header=None) + assert df.loc[15, 1] == "Á köldum klaka (Cold Fever) (1994)" + + @pytest.fixture def tips_df(datapath): """DataFrame with the tips dataset.""" From 13940c7f3c0371d6799bbd88b9c6546392b418a1 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 17 Aug 2020 13:50:13 -0700 Subject: [PATCH 286/632] CI: close sockets in SQL tests (#35772) --- pandas/tests/io/test_sql.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 29b787d39c09d..a7e3162ed7b73 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -263,7 +263,8 @@ def _get_all_tables(self): return table_list def _close_conn(self): - pass + # https://docs.sqlalchemy.org/en/13/core/connections.html#engine-disposal + self.conn.dispose() class PandasSQLTest: @@ -1242,7 +1243,7 @@ class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest): def setup_class(cls): cls.setup_import() cls.setup_driver() - conn = cls.connect() + conn = cls.conn = cls.connect() conn.connect() def load_test_data_and_sql(self): From 66e83ccbb6eb0b24a751ced95def26877e9277ef Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 18 Aug 2020 13:54:55 +0100 Subject: [PATCH 287/632] REGR: follow-up to return copy with df.interpolate on empty DataFrame (#35774) --- pandas/core/generic.py | 2 +- pandas/tests/frame/methods/test_interpolate.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9cbe2f714fd57..fe412bc0ce937 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6893,7 +6893,7 @@ def interpolate( obj = self.T if should_transpose else self if obj.empty: - return self + return self.copy() if method not in fillna_methods: axis = self._info_axis_number diff --git a/pandas/tests/frame/methods/test_interpolate.py b/pandas/tests/frame/methods/test_interpolate.py index 3c9d79397e4bd..6b86a13fcf1b9 100644 --- a/pandas/tests/frame/methods/test_interpolate.py +++ b/pandas/tests/frame/methods/test_interpolate.py @@ -38,6 +38,7 @@ def test_interp_empty(self): # https://github.com/pandas-dev/pandas/issues/35598 df = DataFrame() result = df.interpolate() + assert result is not df expected = df tm.assert_frame_equal(result, expected) From 97ec062714c13a33985801b0929ff991d7d2fe07 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 18 Aug 2020 06:05:30 -0700 Subject: [PATCH 288/632] CLN: remove unused variable (#35783) --- pandas/core/groupby/generic.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1f0cdbd07560f..166631e69f523 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1031,7 +1031,6 @@ def _cython_agg_blocks( data = data.get_numeric_data(copy=False) agg_blocks: List["Block"] = [] - deleted_items: List[np.ndarray] = [] no_result = object() @@ -1133,7 +1132,6 @@ def blk_func(block: "Block") -> List["Block"]: # continue and exclude the block # NotImplementedError -> "ohlc" with wrong dtype skipped.append(i) - deleted_items.append(block.mgr_locs.as_array) else: agg_blocks.extend(nbs) From ddf4f2430dd0a6fd51d7409ad3f524aeb5cbace2 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 19 Aug 2020 10:48:41 +0100 Subject: [PATCH 289/632] DOC: clean v1.1.1 release notes (#35787) Co-authored-by: Joris Van den Bossche --- doc/source/whatsnew/v1.1.1.rst | 60 +++++++--------------------------- 1 file changed, 12 insertions(+), 48 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index ff5bbccf63ffe..43ffed273adbc 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -1,7 +1,7 @@ .. _whatsnew_111: -What's new in 1.1.1 (?) ------------------------ +What's new in 1.1.1 (August XX, 2020) +------------------------------------- These are the changes in pandas 1.1.1. See :ref:`release` for a full changelog including other versions of pandas. @@ -15,20 +15,23 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ +- Fixed regression in :meth:`CategoricalIndex.format` where, when stringified scalars had different lengths, the shorter string would be right-filled with spaces, so it had the same length as the longest string (:issue:`35439`) +- Fixed regression in :meth:`Series.truncate` when trying to truncate a single-element series (:issue:`35544`) - Fixed regression where :meth:`DataFrame.to_numpy` would raise a ``RuntimeError`` for mixed dtypes when converting to ``str`` (:issue:`35455`) - Fixed regression where :func:`read_csv` would raise a ``ValueError`` when ``pandas.options.mode.use_inf_as_na`` was set to ``True`` (:issue:`35493`) - Fixed regression where :func:`pandas.testing.assert_series_equal` would raise an error when non-numeric dtypes were passed with ``check_exact=True`` (:issue:`35446`) -- Fixed regression in :class:`pandas.core.groupby.RollingGroupby` where column selection was ignored (:issue:`35486`) -- Fixed regression where :meth:`DataFrame.interpolate` would raise a ``TypeError`` when the :class:`DataFrame` was empty (:issue:`35598`). +- Fixed regression in ``.groupby(..).rolling(..)`` where column selection was ignored (:issue:`35486`) +- Fixed regression where :meth:`DataFrame.interpolate` would raise a ``TypeError`` when the :class:`DataFrame` was empty (:issue:`35598`) - Fixed regression in :meth:`DataFrame.shift` with ``axis=1`` and heterogeneous dtypes (:issue:`35488`) - Fixed regression in :meth:`DataFrame.diff` with read-only data (:issue:`35559`) - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) -- Fixed regression where :meth:`DataFrame.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) +- Fixed regression where :func:`pandas.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) - Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` where compiled regular expressions would be ignored during replacement (:issue:`35680`) -- Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` where a list of functions would produce the wrong results if at least one of the functions did not aggregate. (:issue:`35490`) +- Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` where a list of functions would produce the wrong results if at least one of the functions did not aggregate (:issue:`35490`) +- Fixed memory usage issue when instantiating large :class:`pandas.arrays.StringArray` (:issue:`35499`) .. --------------------------------------------------------------------------- @@ -37,50 +40,11 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- Bug in ``Styler`` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`). -- Bug in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` where extension dtypes were not ignored when ``check_dtypes`` was set to ``False`` (:issue:`35715`). - -Categorical -^^^^^^^^^^^ - -- Bug in :meth:`CategoricalIndex.format` where, when stringified scalars had different lengths, the shorter string would be right-filled with spaces, so it had the same length as the longest string (:issue:`35439`) - - -**Datetimelike** - -- -- - -**Timedelta** - +- Bug in :class:`~pandas.io.formats.style.Styler` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`) +- Bug in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` where extension dtypes were not ignored when ``check_dtypes`` was set to ``False`` (:issue:`35715`) - Bug in :meth:`to_timedelta` fails when arg is a :class:`Series` with `Int64` dtype containing null values (:issue:`35574`) - - -**Numeric** - -- -- - -**Groupby/resample/rolling** - -- Bug in :class:`pandas.core.groupby.RollingGroupby` where passing ``closed`` with column selection would raise a ``ValueError`` (:issue:`35549`) - -**Plotting** - -- - -**Indexing** - -- Bug in :meth:`Series.truncate` when trying to truncate a single-element series (:issue:`35544`) - -**DataFrame** +- Bug in ``.groupby(..).rolling(..)`` where passing ``closed`` with column selection would raise a ``ValueError`` (:issue:`35549`) - Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`) -- - -**Strings** - -- fix memory usage issue when instantiating large :class:`pandas.arrays.StringArray` (:issue:`35499`) - .. --------------------------------------------------------------------------- From 33593a4aac169c4fe7d339a8ff3c0799cb6b1157 Mon Sep 17 00:00:00 2001 From: tpanza <19810086+tpanza@users.noreply.github.com> Date: Wed, 19 Aug 2020 06:05:40 -0700 Subject: [PATCH 290/632] lreshape and wide_to_long documentation (Closes #33417) (#33418) --- pandas/core/frame.py | 6 +++++ pandas/core/reshape/melt.py | 51 ++++++++++++++++++++++++++++++++----- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1587dd8798ec3..a4408d1f5d23d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6609,6 +6609,8 @@ def groupby( duplicate values for one index/column pair. DataFrame.unstack : Pivot based on the index values instead of a column. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. Notes ----- @@ -6763,6 +6765,10 @@ def pivot(self, index=None, columns=None, values=None) -> "DataFrame": -------- DataFrame.pivot : Pivot without aggregation that can handle non-numeric data. + DataFrame.melt: Unpivot a DataFrame from wide to long format, + optionally leaving identifiers set. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. Examples -------- diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 1ba6854a79265..8724f7674f0c8 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -144,14 +144,43 @@ def melt( @deprecate_kwarg(old_arg_name="label", new_arg_name=None) def lreshape(data: "DataFrame", groups, dropna: bool = True, label=None) -> "DataFrame": """ - Reshape long-format data to wide. Generalized inverse of DataFrame.pivot + Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. + + Accepts a dictionary, ``groups``, in which each key is a new column name + and each value is a list of old column names that will be "melted" under + the new column name as part of the reshape. Parameters ---------- data : DataFrame + The wide-format DataFrame. groups : dict - {new_name : list_of_columns} - dropna : boolean, default True + {new_name : list_of_columns}. + dropna : bool, default True + Do not include columns whose entries are all NaN. + label : None + Not used. + + .. deprecated:: 1.0.0 + + Returns + ------- + DataFrame + Reshaped DataFrame. + + See Also + -------- + melt : Unpivot a DataFrame from wide to long format, optionally leaving + identifiers set. + pivot : Create a spreadsheet-style pivot table as a DataFrame. + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. Examples -------- @@ -169,10 +198,6 @@ def lreshape(data: "DataFrame", groups, dropna: bool = True, label=None) -> "Dat 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 - - Returns - ------- - reshaped : DataFrame """ if isinstance(groups, dict): keys = list(groups.keys()) @@ -262,6 +287,18 @@ def wide_to_long( A DataFrame that contains each stub name as a variable, with new index (i, j). + See Also + -------- + melt : Unpivot a DataFrame from wide to long format, optionally leaving + identifiers set. + pivot : Create a spreadsheet-style pivot table as a DataFrame. + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + Notes ----- All extra variables are left untouched. This simply uses From 06f17182aaf1d2f5a414542e26c96b609eececb3 Mon Sep 17 00:00:00 2001 From: edwardkong <33737404+edwardkong@users.noreply.github.com> Date: Wed, 19 Aug 2020 09:16:41 -0400 Subject: [PATCH 291/632] Changed 'int' type to 'integer' in to_numeric docstring (#35776) --- pandas/core/arrays/sparse/array.py | 2 +- pandas/core/tools/numeric.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index d8db196e4b92f..1531f7b292365 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -234,7 +234,7 @@ class SparseArray(PandasObject, ExtensionArray, ExtensionOpsMixin): 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype` is not a ``SparseDtype`` and `data` is a ``SparseArray``. - kind : {'int', 'block'}, default 'int' + kind : {'integer', 'block'}, default 'integer' The type of storage for sparse locations. * 'block': Stores a `block` and `block_length` for each diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 41548931f17f8..cff4695603d06 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -40,13 +40,13 @@ def to_numeric(arg, errors="raise", downcast=None): - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaN. - If 'ignore', then invalid parsing will return the input. - downcast : {'int', 'signed', 'unsigned', 'float'}, default None + downcast : {'integer', 'signed', 'unsigned', 'float'}, default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - - 'int' or 'signed': smallest signed int dtype (min.: np.int8) + - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) From bdc7bb11325905f446a9991105930e138d2abc8d Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Wed, 19 Aug 2020 18:42:47 +0100 Subject: [PATCH 292/632] CI: Unpin Pytest + Pytest Asyncio Min Version (#35757) --- ci/deps/azure-38-locale.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/deps/azure-38-locale.yaml b/ci/deps/azure-38-locale.yaml index c466a5929ea29..c7090d3a46a77 100644 --- a/ci/deps/azure-38-locale.yaml +++ b/ci/deps/azure-38-locale.yaml @@ -6,9 +6,9 @@ dependencies: # tools - cython>=0.29.16 - - pytest>=5.0.1,<6.0.0 # https://github.com/pandas-dev/pandas/issues/35620 + - pytest>=5.0.1 - pytest-xdist>=1.21 - - pytest-asyncio + - pytest-asyncio>=0.12.0 - hypothesis>=3.58.0 - pytest-azurepipelines From a1ac051c8908502f064a4f2f3f8ffdbe7bb41797 Mon Sep 17 00:00:00 2001 From: Tobias Pitters <31857876+CloseChoice@users.noreply.github.com> Date: Wed, 19 Aug 2020 19:59:03 +0200 Subject: [PATCH 293/632] BUG: pd.crosstab fails when passed multiple columns, margins True and normalize True (#35150) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/reshape/pivot.py | 7 ++--- pandas/tests/reshape/test_crosstab.py | 45 +++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 42f95d88d74ac..f27c83fafef55 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -258,6 +258,7 @@ Reshaping - Bug in :meth:`DataFrame.pivot_table` with ``aggfunc='count'`` or ``aggfunc='sum'`` returning ``NaN`` for missing categories when pivoted on a ``Categorical``. Now returning ``0`` (:issue:`31422`) - Bug in :func:`union_indexes` where input index names are not preserved in some cases. Affects :func:`concat` and :class:`DataFrame` constructor (:issue:`13475`) +- Bug in func :meth:`crosstab` when using multiple columns with ``margins=True`` and ``normalize=True`` (:issue:`35144`) - Sparse diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index ea5916eff3afa..64a9e2dbf6d99 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -670,12 +670,11 @@ def _normalize(table, normalize, margins: bool, margins_name="All"): # keep index and column of pivoted table table_index = table.index table_columns = table.columns + last_ind_or_col = table.iloc[-1, :].name - # check if margin name is in (for MI cases) or equal to last + # check if margin name is not in (for MI cases) and not equal to last # index/column and save the column and index margin - if (margins_name not in table.iloc[-1, :].name) | ( - margins_name != table.iloc[:, -1].name - ): + if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col): raise ValueError(f"{margins_name} not in pivoted DataFrame") column_margin = table.iloc[:-1, -1] index_margin = table.iloc[-1, :-1] diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index 8795af2e11122..6f5550a6f8209 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -698,3 +698,48 @@ def test_margin_normalize(self): names=["A", "B"], ) tm.assert_frame_equal(result, expected) + + def test_margin_normalize_multiple_columns(self): + # GH 35144 + # use multiple columns with margins and normalization + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + result = crosstab( + index=df.C, + columns=[df.A, df.B], + margins=True, + margins_name="margin", + normalize=True, + ) + expected = DataFrame( + [ + [0.111111, 0.111111, 0.222222, 0.000000, 0.444444], + [0.111111, 0.111111, 0.111111, 0.222222, 0.555556], + [0.222222, 0.222222, 0.333333, 0.222222, 1.0], + ], + index=["large", "small", "margin"], + ) + expected.columns = MultiIndex( + levels=[["bar", "foo", "margin"], ["", "one", "two"]], + codes=[[0, 0, 1, 1, 2], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.index.name = "C" + tm.assert_frame_equal(result, expected) From 71dfed7455106f79869641c44f0ab94258616ce2 Mon Sep 17 00:00:00 2001 From: Yutaro Ikeda Date: Thu, 20 Aug 2020 03:02:55 +0900 Subject: [PATCH 294/632] ENH: GH-35611 Tests for top-level Pandas functions serializable (#35692) --- pandas/tests/test_common.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index bcfed2d0d3a10..3d45a1f7389b7 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -10,6 +10,7 @@ import pandas as pd from pandas import Series, Timestamp +import pandas._testing as tm from pandas.core import ops import pandas.core.common as com @@ -157,3 +158,12 @@ def test_version_tag(): raise ValueError( "No git tags exist, please sync tags between upstream and your repo" ) + + +@pytest.mark.parametrize( + "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)] +) +def test_serializable(obj): + # GH 35611 + unpickled = tm.round_trip_pickle(obj) + assert type(obj) == type(unpickled) From eec60803a72b9a14afacdacb4e72649684417dfe Mon Sep 17 00:00:00 2001 From: Tobias Pitters <31857876+CloseChoice@users.noreply.github.com> Date: Wed, 19 Aug 2020 22:22:59 +0200 Subject: [PATCH 295/632] =?UTF-8?q?fix=20bug=20when=20combining=20groupby?= =?UTF-8?q?=20with=20resample=20and=20interpolate=20with=20dat=E2=80=A6=20?= =?UTF-8?q?(#35360)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/source/whatsnew/v1.2.0.rst | 3 +- pandas/core/resample.py | 2 +- pandas/tests/resample/test_time_grouper.py | 62 ++++++++++++++++++++++ 3 files changed, 64 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f27c83fafef55..8ec75b4846ae2 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -249,8 +249,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.count` and :meth:`SeriesGroupBy.sum` returning ``NaN`` for missing categories when grouped on multiple ``Categoricals``. Now returning ``0`` (:issue:`35028`) - Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) -- -- +- Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising an ``TypeError`` (:issue:`35325`) - Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) Reshaping diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e82a1d4d2cda8..fc54128ae5aa6 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -795,7 +795,7 @@ def interpolate( """ Interpolate values according to different methods. """ - result = self._upsample(None) + result = self._upsample("asfreq") return result.interpolate( method=method, axis=axis, diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 26e429c47b494..f638706207679 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -287,3 +287,65 @@ def test_upsample_sum(method, method_args, expected_values): result = methodcaller(method, **method_args)(resampled) expected = pd.Series(expected_values, index=index) tm.assert_series_equal(result, expected) + + +def test_groupby_resample_interpolate(): + # GH 35325 + d = {"price": [10, 11, 9], "volume": [50, 60, 50]} + + df = pd.DataFrame(d) + + df["week_starting"] = pd.date_range("01/01/2018", periods=3, freq="W") + + result = ( + df.set_index("week_starting") + .groupby("volume") + .resample("1D") + .interpolate(method="linear") + ) + expected_ind = pd.MultiIndex.from_tuples( + [ + (50, "2018-01-07"), + (50, pd.Timestamp("2018-01-08")), + (50, pd.Timestamp("2018-01-09")), + (50, pd.Timestamp("2018-01-10")), + (50, pd.Timestamp("2018-01-11")), + (50, pd.Timestamp("2018-01-12")), + (50, pd.Timestamp("2018-01-13")), + (50, pd.Timestamp("2018-01-14")), + (50, pd.Timestamp("2018-01-15")), + (50, pd.Timestamp("2018-01-16")), + (50, pd.Timestamp("2018-01-17")), + (50, pd.Timestamp("2018-01-18")), + (50, pd.Timestamp("2018-01-19")), + (50, pd.Timestamp("2018-01-20")), + (50, pd.Timestamp("2018-01-21")), + (60, pd.Timestamp("2018-01-14")), + ], + names=["volume", "week_starting"], + ) + expected = pd.DataFrame( + data={ + "price": [ + 10.0, + 9.928571428571429, + 9.857142857142858, + 9.785714285714286, + 9.714285714285714, + 9.642857142857142, + 9.571428571428571, + 9.5, + 9.428571428571429, + 9.357142857142858, + 9.285714285714286, + 9.214285714285714, + 9.142857142857142, + 9.071428571428571, + 9.0, + 11.0, + ], + "volume": [50.0] * 15 + [60], + }, + index=expected_ind, + ) + tm.assert_frame_equal(result, expected) From 5d18e13c99ea3346514f518d4bdbdf81baad321f Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Wed, 19 Aug 2020 15:40:04 -0500 Subject: [PATCH 296/632] CI: unpin numpy and matplotlib #35779 (#35780) --- doc/source/user_guide/visualization.rst | 2 ++ environment.yml | 5 ++--- pandas/core/frame.py | 2 +- pandas/core/series.py | 2 +- pandas/plotting/_matplotlib/boxplot.py | 2 +- requirements-dev.txt | 4 ++-- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index 5bc87bca87211..8ce4b30c717a4 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -668,6 +668,7 @@ A ``ValueError`` will be raised if there are any negative values in your data. plt.figure() .. ipython:: python + :okwarning: series = pd.Series(3 * np.random.rand(4), index=['a', 'b', 'c', 'd'], name='series') @@ -742,6 +743,7 @@ If you pass values whose sum total is less than 1.0, matplotlib draws a semicirc plt.figure() .. ipython:: python + :okwarning: series = pd.Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') diff --git a/environment.yml b/environment.yml index 1e51470d43d36..aaabf09b8f190 100644 --- a/environment.yml +++ b/environment.yml @@ -3,8 +3,7 @@ channels: - conda-forge dependencies: # required - # Pin numpy<1.19 until MPL 3.3.0 is released. - - numpy>=1.16.5,<1.19.0 + - numpy>=1.16.5 - python=3 - python-dateutil>=2.7.3 - pytz @@ -73,7 +72,7 @@ dependencies: - ipykernel - ipython>=7.11.1 - jinja2 # pandas.Styler - - matplotlib>=2.2.2,<3.3.0 # pandas.plotting, Series.plot, DataFrame.plot + - matplotlib>=2.2.2 # pandas.plotting, Series.plot, DataFrame.plot - numexpr>=2.6.8 - scipy>=1.2 - numba>=0.46.0 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a4408d1f5d23d..837bd35414773 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2600,7 +2600,7 @@ def to_html( 1 column_2 1000000 non-null object 2 column_3 1000000 non-null object dtypes: object(3) - memory usage: 188.8 MB""" + memory usage: 165.9 MB""" ), see_also_sub=( """ diff --git a/pandas/core/series.py b/pandas/core/series.py index e8bf87a39b572..cd2db659fbd0e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4637,7 +4637,7 @@ def memory_usage(self, index=True, deep=False): >>> s.memory_usage() 144 >>> s.memory_usage(deep=True) - 260 + 244 """ v = super().memory_usage(deep=deep) if index: diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 53ef97bbe9a72..b33daf39de37c 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -294,7 +294,7 @@ def maybe_color_bp(bp, **kwds): def plot_group(keys, values, ax): keys = [pprint_thing(x) for x in keys] - values = [np.asarray(remove_na_arraylike(v)) for v in values] + values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] bp = ax.boxplot(values, **kwds) if fontsize is not None: ax.tick_params(axis="both", labelsize=fontsize) diff --git a/requirements-dev.txt b/requirements-dev.txt index 66e72641cd5bb..3d0778b74ccbd 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ # This file is auto-generated from environment.yml, do not modify. # See that file for comments about the need/usage of each dependency. -numpy>=1.16.5,<1.19.0 +numpy>=1.16.5 python-dateutil>=2.7.3 pytz asv @@ -47,7 +47,7 @@ bottleneck>=1.2.1 ipykernel ipython>=7.11.1 jinja2 -matplotlib>=2.2.2,<3.3.0 +matplotlib>=2.2.2 numexpr>=2.6.8 scipy>=1.2 numba>=0.46.0 From 741a21bb714420775ed5e8d085a23c9f54046332 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Thu, 20 Aug 2020 02:24:26 -0500 Subject: [PATCH 297/632] CI/DOC: unpin sphinx, fix autodoc usage (#35815) --- doc/source/reference/frame.rst | 1 + doc/source/reference/series.rst | 1 + environment.yml | 2 +- requirements-dev.txt | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index e3dfb552651a0..4d9d18e3d204e 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -343,6 +343,7 @@ Sparse-dtype specific methods and attributes are provided under the .. autosummary:: :toctree: api/ + :template: autosummary/accessor_method.rst DataFrame.sparse.from_spmatrix DataFrame.sparse.to_coo diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index 3b595ba5ab206..ae3e121ca8212 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -522,6 +522,7 @@ Sparse-dtype specific methods and attributes are provided under the .. autosummary:: :toctree: api/ + :template: autosummary/accessor_method.rst Series.sparse.from_coo Series.sparse.to_coo diff --git a/environment.yml b/environment.yml index aaabf09b8f190..806119631d5ee 100644 --- a/environment.yml +++ b/environment.yml @@ -27,7 +27,7 @@ dependencies: # documentation - gitpython # obtain contributors from git for whatsnew - gitdb2=2.0.6 # GH-32060 - - sphinx<=3.1.1 + - sphinx # documentation (jupyter notebooks) - nbconvert>=5.4.1 diff --git a/requirements-dev.txt b/requirements-dev.txt index 3d0778b74ccbd..deaed8ab9d5f1 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -16,7 +16,7 @@ mypy==0.730 pycodestyle gitpython gitdb2==2.0.6 -sphinx<=3.1.1 +sphinx nbconvert>=5.4.1 nbsphinx pandoc From 073048fd510ac52cf325b23af7d9db06f61cdb8c Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 20 Aug 2020 13:03:30 +0100 Subject: [PATCH 298/632] CI: more xfail failing 32-bit tests (#35809) --- pandas/tests/window/test_grouper.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index a9590c7e1233a..d0a62374d0888 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -215,6 +215,7 @@ def foo(x): ) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling_center_center(self): # GH 35552 series = Series(range(1, 6)) @@ -280,6 +281,7 @@ def test_groupby_rolling_center_center(self): ) tm.assert_frame_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_subselect_rolling(self): # GH 35486 df = DataFrame( @@ -305,6 +307,7 @@ def test_groupby_subselect_rolling(self): ) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling_custom_indexer(self): # GH 35557 class SimpleIndexer(pd.api.indexers.BaseIndexer): @@ -328,6 +331,7 @@ def get_window_bounds( expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum() tm.assert_frame_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame( @@ -352,6 +356,7 @@ def test_groupby_rolling_subset_with_closed(self): ) tm.assert_series_equal(result, expected) + @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_subset_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame( From 2778c3e26b40e84ac9ec31719b4f427e818c039e Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Thu, 20 Aug 2020 16:52:52 +0200 Subject: [PATCH 299/632] ROADMAP: add consistent missing values for all dtypes to the roadmap (#35208) --- doc/source/development/roadmap.rst | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/doc/source/development/roadmap.rst b/doc/source/development/roadmap.rst index d331491d02883..efee21b5889ed 100644 --- a/doc/source/development/roadmap.rst +++ b/doc/source/development/roadmap.rst @@ -53,6 +53,32 @@ need to implement certain operations expected by pandas users (for example the algorithm used in, ``Series.str.upper``). That work may be done outside of pandas. +Consistent missing value handling +--------------------------------- + +Currently, pandas handles missing data differently for different data types. We +use different types to indicate that a value is missing (``np.nan`` for +floating-point data, ``np.nan`` or ``None`` for object-dtype data -- typically +strings or booleans -- with missing values, and ``pd.NaT`` for datetimelike +data). Integer data cannot store missing data or are cast to float. In addition, +pandas 1.0 introduced a new missing value sentinel, ``pd.NA``, which is being +used for the experimental nullable integer, boolean, and string data types. + +These different missing values have different behaviors in user-facing +operations. Specifically, we introduced different semantics for the nullable +data types for certain operations (e.g. propagating in comparison operations +instead of comparing as False). + +Long term, we want to introduce consistent missing data handling for all data +types. This includes consistent behavior in all operations (indexing, arithmetic +operations, comparisons, etc.). We want to eventually make the new semantics the +default. + +This has been discussed at +`github #28095 `__ (and +linked issues), and described in more detail in this +`design doc `__. + Apache Arrow interoperability ----------------------------- From 6b2438038f71604a361379099e7de8110199e3fd Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 20 Aug 2020 16:05:46 +0100 Subject: [PATCH 300/632] DOC: another pass of v1.1.1 release notes (#35801) --- doc/source/whatsnew/v1.1.1.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 43ffed273adbc..721f07c865409 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -1,6 +1,6 @@ .. _whatsnew_111: -What's new in 1.1.1 (August XX, 2020) +What's new in 1.1.1 (August 20, 2020) ------------------------------------- These are the changes in pandas 1.1.1. See :ref:`release` for a full changelog @@ -27,7 +27,7 @@ Fixed regressions - Fixed regression in ``.groupby(..).rolling(..)`` where a segfault would occur with ``center=True`` and an odd number of values (:issue:`35552`) - Fixed regression in :meth:`DataFrame.apply` where functions that altered the input in-place only operated on a single row (:issue:`35462`) - Fixed regression in :meth:`DataFrame.reset_index` would raise a ``ValueError`` on empty :class:`DataFrame` with a :class:`MultiIndex` with a ``datetime64`` dtype level (:issue:`35606`, :issue:`35657`) -- Fixed regression where :func:`pandas.merge_asof` would raise a ``UnboundLocalError`` when ``left_index`` , ``right_index`` and ``tolerance`` were set (:issue:`35558`) +- Fixed regression where :func:`pandas.merge_asof` would raise a ``UnboundLocalError`` when ``left_index``, ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) - Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` where compiled regular expressions would be ignored during replacement (:issue:`35680`) - Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` where a list of functions would produce the wrong results if at least one of the functions did not aggregate (:issue:`35490`) @@ -40,11 +40,11 @@ Fixed regressions Bug fixes ~~~~~~~~~ -- Bug in :class:`~pandas.io.formats.style.Styler` whereby `cell_ids` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`) +- Bug in :class:`~pandas.io.formats.style.Styler` whereby ``cell_ids`` argument had no effect due to other recent changes (:issue:`35588`) (:issue:`35663`) - Bug in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` where extension dtypes were not ignored when ``check_dtypes`` was set to ``False`` (:issue:`35715`) -- Bug in :meth:`to_timedelta` fails when arg is a :class:`Series` with `Int64` dtype containing null values (:issue:`35574`) +- Bug in :meth:`to_timedelta` fails when ``arg`` is a :class:`Series` with ``Int64`` dtype containing null values (:issue:`35574`) - Bug in ``.groupby(..).rolling(..)`` where passing ``closed`` with column selection would raise a ``ValueError`` (:issue:`35549`) -- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when data and index have mismatched lengths (:issue:`33437`) +- Bug in :class:`DataFrame` constructor failing to raise ``ValueError`` in some cases when ``data`` and ``index`` have mismatched lengths (:issue:`33437`) .. --------------------------------------------------------------------------- From 8cbf46bf5e0d31982937f510349235e07cd07615 Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Thu, 20 Aug 2020 17:26:30 +0100 Subject: [PATCH 301/632] Add new core members (#35812) --- web/pandas/config.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/web/pandas/config.yml b/web/pandas/config.yml index 23575cc123050..9a178d26659c3 100644 --- a/web/pandas/config.yml +++ b/web/pandas/config.yml @@ -79,6 +79,13 @@ maintainers: - datapythonista - simonjayhawkins - topper-123 + - alimcmaster1 + - bashtage + - charlesdong1991 + - Dr-Irv + - dsaxton + - MarcoGorelli + - rhshadrach emeritus: - Wouter Overmeire - Skipper Seabold From 98d8065c7705d5aa15ef203e6267cf51bff2de9a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 20 Aug 2020 09:59:23 -0700 Subject: [PATCH 302/632] REF: insert self.on column _after_ concat (#35746) --- pandas/core/window/rolling.py | 50 +++++++++++++++-------------------- 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 966773b7c6982..ac96258cbc3c9 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -38,7 +38,7 @@ from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin import pandas.core.common as com from pandas.core.construction import extract_array -from pandas.core.indexes.api import Index, MultiIndex, ensure_index +from pandas.core.indexes.api import Index, MultiIndex from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.core.window.common import ( WindowGroupByMixin, @@ -402,36 +402,27 @@ def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries: return result final.append(result) - # if we have an 'on' column - # we want to put it back into the results - # in the same location - columns = self._selected_obj.columns - if self.on is not None and not self._on.equals(obj.index): - - name = self._on.name - final.append(Series(self._on, index=obj.index, name=name)) - - if self._selection is not None: - - selection = ensure_index(self._selection) - - # need to reorder to include original location of - # the on column (if its not already there) - if name not in selection: - columns = self.obj.columns - indexer = columns.get_indexer(selection.tolist() + [name]) - columns = columns.take(sorted(indexer)) - - # exclude nuisance columns so that they are not reindexed - if exclude is not None and exclude: - columns = [c for c in columns if c not in exclude] + exclude = exclude or [] + columns = [c for c in self._selected_obj.columns if c not in exclude] + if not columns and not len(final) and exclude: + raise DataError("No numeric types to aggregate") + elif not len(final): + return obj.astype("float64") - if not columns: - raise DataError("No numeric types to aggregate") + df = concat(final, axis=1).reindex(columns=columns, copy=False) - if not len(final): - return obj.astype("float64") - return concat(final, axis=1).reindex(columns=columns, copy=False) + # if we have an 'on' column we want to put it back into + # the results in the same location + if self.on is not None and not self._on.equals(obj.index): + name = self._on.name + extra_col = Series(self._on, index=obj.index, name=name) + if name not in df.columns and name not in df.index.names: + new_loc = len(df.columns) + df.insert(new_loc, name, extra_col) + elif name in df.columns: + # TODO: sure we want to overwrite results? + df[name] = extra_col + return df def _center_window(self, result, window) -> np.ndarray: """ @@ -2277,6 +2268,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) indexer_kwargs = self.window.__dict__ + assert isinstance(indexer_kwargs, dict) # We'll be using the index of each group later indexer_kwargs.pop("index_array", None) elif self.is_freq_type: From a67e988afac9927e89645059eae3ddcca64df87b Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Thu, 20 Aug 2020 22:26:04 +0100 Subject: [PATCH 303/632] Pyarrow Min Versoin (#35828) --- ci/deps/travis-37-locale.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml index 4427c1d940bf2..6dc1c2f89cc6f 100644 --- a/ci/deps/travis-37-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -28,6 +28,7 @@ dependencies: - openpyxl - pandas-gbq=0.12.0 - psycopg2=2.7 + - pyarrow>=0.15.0 # GH #35813 - pymysql=0.7.11 - pytables - python-dateutil From db6414fbea66aa59dfc0fcd0e19648fc532f7502 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 20 Aug 2020 23:19:00 +0100 Subject: [PATCH 304/632] DOC: Start 1.1.2 (#35825) --- doc/source/whatsnew/index.rst | 1 + doc/source/whatsnew/v1.1.1.rst | 2 +- doc/source/whatsnew/v1.1.2.rst | 38 ++++++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) create mode 100644 doc/source/whatsnew/v1.1.2.rst diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index a280a981c789b..1827d151579a1 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.1 .. toctree:: :maxdepth: 2 + v1.1.2 v1.1.1 v1.1.0 diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 721f07c865409..77ea67f76f655 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -53,4 +53,4 @@ Bug fixes Contributors ~~~~~~~~~~~~ -.. contributors:: v1.1.0..v1.1.1|HEAD +.. contributors:: v1.1.0..v1.1.1 diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst new file mode 100644 index 0000000000000..81acd567027e5 --- /dev/null +++ b/doc/source/whatsnew/v1.1.2.rst @@ -0,0 +1,38 @@ +.. _whatsnew_112: + +What's new in 1.1.2 (??) +------------------------ + +These are the changes in pandas 1.1.2. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_112.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_112.bug_fixes: + +Bug fixes +~~~~~~~~~ + +- +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_112.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.1.1..v1.1.2|HEAD From 8f795432ea83e4e88e254cccfcf90e3cafc5cca8 Mon Sep 17 00:00:00 2001 From: joooeey Date: Fri, 21 Aug 2020 19:16:29 +0200 Subject: [PATCH 305/632] DOC: timezone warning for dates beyond TODAY (#34100) * DOC: timezone warning for dates beyond TODAY introducing a suggestion discussed in PR #33863 : Added a warning in the user guide that timezone conversion on future dates is inherently unreliable. * shorter warning text Co-authored-by: Marco Gorelli --- doc/source/user_guide/timeseries.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index a03ba6c775e68..0bfe9d9b68cdb 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -2319,13 +2319,18 @@ you can use the ``tz_convert`` method. Instead, the datetime needs to be localized using the ``localize`` method on the ``pytz`` time zone object. +.. warning:: + + Be aware that for times in the future, correct conversion between time zones + (and UTC) cannot be guaranteed by any time zone library because a timezone's + offset from UTC may be changed by the respective government. + .. warning:: If you are using dates beyond 2038-01-18, due to current deficiencies in the underlying libraries caused by the year 2038 problem, daylight saving time (DST) adjustments to timezone aware dates will not be applied. If and when the underlying libraries are fixed, - the DST transitions will be applied. It should be noted though, that time zone data for far future time zones - are likely to be inaccurate, as they are simple extrapolations of the current set of (regularly revised) rules. + the DST transitions will be applied. For example, for two dates that are in British Summer Time (and so would normally be GMT+1), both the following asserts evaluate as true: From 712ffbd2c21e93843ec42807d720e1814b94ae47 Mon Sep 17 00:00:00 2001 From: Martin Durant Date: Fri, 21 Aug 2020 16:14:03 -0400 Subject: [PATCH 306/632] Moto server (#35655) --- ci/deps/azure-37-locale.yaml | 1 + ci/deps/azure-37-slow.yaml | 2 + ci/deps/azure-38-locale.yaml | 2 + ci/deps/azure-windows-37.yaml | 7 +- ci/deps/azure-windows-38.yaml | 4 + ci/deps/travis-37-arm64.yaml | 1 + ci/deps/travis-37-cov.yaml | 3 +- ci/deps/travis-37-locale.yaml | 2 +- ci/deps/travis-37.yaml | 4 +- doc/source/whatsnew/v1.2.0.rst | 3 + environment.yml | 1 + pandas/io/excel/_base.py | 30 ++++- pandas/io/excel/_odfreader.py | 14 +- pandas/io/excel/_openpyxl.py | 12 +- pandas/io/excel/_pyxlsb.py | 14 +- pandas/io/excel/_xlrd.py | 7 +- pandas/io/feather_format.py | 7 +- pandas/io/parsers.py | 4 +- pandas/tests/io/conftest.py | 155 ++++++++++++++++------- pandas/tests/io/excel/test_readers.py | 5 +- pandas/tests/io/json/test_compression.py | 6 +- pandas/tests/io/json/test_pandas.py | 13 +- pandas/tests/io/parser/test_network.py | 85 ++++++++----- pandas/tests/io/test_fsspec.py | 29 +++-- pandas/tests/io/test_parquet.py | 15 ++- requirements-dev.txt | 1 + 26 files changed, 307 insertions(+), 120 deletions(-) diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index a6552aa096a22..cc996f4077cd9 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -21,6 +21,7 @@ dependencies: - lxml - matplotlib>=3.3.0 - moto + - flask - nomkl - numexpr - numpy=1.16.* diff --git a/ci/deps/azure-37-slow.yaml b/ci/deps/azure-37-slow.yaml index e8ffd3d74ca5e..d17a8a2b0ed9b 100644 --- a/ci/deps/azure-37-slow.yaml +++ b/ci/deps/azure-37-slow.yaml @@ -27,9 +27,11 @@ dependencies: - python-dateutil - pytz - s3fs>=0.4.0 + - moto>=1.3.14 - scipy - sqlalchemy - xlrd - xlsxwriter - xlwt - moto + - flask diff --git a/ci/deps/azure-38-locale.yaml b/ci/deps/azure-38-locale.yaml index c7090d3a46a77..bb40127b672d3 100644 --- a/ci/deps/azure-38-locale.yaml +++ b/ci/deps/azure-38-locale.yaml @@ -14,6 +14,7 @@ dependencies: # pandas dependencies - beautifulsoup4 + - flask - html5lib - ipython - jinja2 @@ -32,6 +33,7 @@ dependencies: - xlrd - xlsxwriter - xlwt + - moto - pyarrow>=0.15 - pip - pip: diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index f4c238ab8b173..4894129915722 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -15,13 +15,14 @@ dependencies: # pandas dependencies - beautifulsoup4 - bottleneck - - fsspec>=0.7.4 + - fsspec>=0.8.0 - gcsfs>=0.6.0 - html5lib - jinja2 - lxml - matplotlib=2.2.* - - moto + - moto>=1.3.14 + - flask - numexpr - numpy=1.16.* - openpyxl @@ -29,7 +30,7 @@ dependencies: - pytables - python-dateutil - pytz - - s3fs>=0.4.0 + - s3fs>=0.4.2 - scipy - sqlalchemy - xlrd diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 1f383164b5328..2853e12b28e35 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -16,7 +16,10 @@ dependencies: - blosc - bottleneck - fastparquet>=0.3.2 + - flask + - fsspec>=0.8.0 - matplotlib=3.1.3 + - moto>=1.3.14 - numba - numexpr - numpy=1.18.* @@ -26,6 +29,7 @@ dependencies: - pytables - python-dateutil - pytz + - s3fs>=0.4.0 - scipy - xlrd - xlsxwriter diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index 5cb53489be225..ea29cbef1272b 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -17,5 +17,6 @@ dependencies: - python-dateutil - pytz - pip + - flask - pip: - moto diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml index edc11bdf4ab35..33ee6dfffb1a3 100644 --- a/ci/deps/travis-37-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -23,7 +23,8 @@ dependencies: - geopandas - html5lib - matplotlib - - moto + - moto>=1.3.14 + - flask - nomkl - numexpr - numpy=1.16.* diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml index 6dc1c2f89cc6f..306f74a0101e3 100644 --- a/ci/deps/travis-37-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -21,12 +21,12 @@ dependencies: - jinja2 - lxml=4.3.0 - matplotlib=3.0.* - - moto - nomkl - numexpr - numpy - openpyxl - pandas-gbq=0.12.0 + - pyarrow>=0.17 - psycopg2=2.7 - pyarrow>=0.15.0 # GH #35813 - pymysql=0.7.11 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index e896233aac63c..26d6c2910a7cc 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -20,8 +20,8 @@ dependencies: - pyarrow - pytz - s3fs>=0.4.0 + - moto>=1.3.14 + - flask - tabulate - pyreadstat - pip - - pip: - - moto diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8ec75b4846ae2..3e6ed1cdf8f7e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -24,6 +24,9 @@ of the individual storage backends (detailed from the fsspec docs for `builtin implementations`_ and linked to `external ones`_). See Section :ref:`io.remote`. +:issue:`35655` added fsspec support (including ``storage_options``) +for reading excel files. + .. _builtin implementations: https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations .. _external ones: https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations diff --git a/environment.yml b/environment.yml index 806119631d5ee..6afc19c227512 100644 --- a/environment.yml +++ b/environment.yml @@ -51,6 +51,7 @@ dependencies: - botocore>=1.11 - hypothesis>=3.82 - moto # mock S3 + - flask - pytest>=5.0.1 - pytest-cov - pytest-xdist>=1.21 diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index b1bbda4a4b7e0..aaef71910c9ab 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -3,11 +3,12 @@ from io import BufferedIOBase, BytesIO, RawIOBase import os from textwrap import fill -from typing import Union +from typing import Any, Mapping, Union from pandas._config import config from pandas._libs.parsers import STR_NA_VALUES +from pandas._typing import StorageOptions from pandas.errors import EmptyDataError from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments @@ -199,6 +200,15 @@ Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. +storage_options : StorageOptions + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values + + .. versionadded:: 1.2.0 Returns ------- @@ -298,10 +308,11 @@ def read_excel( skipfooter=0, convert_float=True, mangle_dupe_cols=True, + storage_options: StorageOptions = None, ): if not isinstance(io, ExcelFile): - io = ExcelFile(io, engine=engine) + io = ExcelFile(io, storage_options=storage_options, engine=engine) elif engine and engine != io.engine: raise ValueError( "Engine should not be specified when passing " @@ -336,12 +347,14 @@ def read_excel( class _BaseExcelReader(metaclass=abc.ABCMeta): - def __init__(self, filepath_or_buffer): + def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): # If filepath_or_buffer is a url, load the data into a BytesIO if is_url(filepath_or_buffer): filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read()) elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer(filepath_or_buffer) + filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer, storage_options=storage_options + ) if isinstance(filepath_or_buffer, self._workbook_class): self.book = filepath_or_buffer @@ -837,14 +850,16 @@ class ExcelFile: from pandas.io.excel._pyxlsb import _PyxlsbReader from pandas.io.excel._xlrd import _XlrdReader - _engines = { + _engines: Mapping[str, Any] = { "xlrd": _XlrdReader, "openpyxl": _OpenpyxlReader, "odf": _ODFReader, "pyxlsb": _PyxlsbReader, } - def __init__(self, path_or_buffer, engine=None): + def __init__( + self, path_or_buffer, engine=None, storage_options: StorageOptions = None + ): if engine is None: engine = "xlrd" if isinstance(path_or_buffer, (BufferedIOBase, RawIOBase)): @@ -858,13 +873,14 @@ def __init__(self, path_or_buffer, engine=None): raise ValueError(f"Unknown engine: {engine}") self.engine = engine + self.storage_options = storage_options # Could be a str, ExcelFile, Book, etc. self.io = path_or_buffer # Always a string self._io = stringify_path(path_or_buffer) - self._reader = self._engines[engine](self._io) + self._reader = self._engines[engine](self._io, storage_options=storage_options) def __fspath__(self): return self._io diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 44abaf5d3b3c9..a6cd8f524503b 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -2,7 +2,7 @@ import numpy as np -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency import pandas as pd @@ -16,13 +16,19 @@ class _ODFReader(_BaseExcelReader): Parameters ---------- - filepath_or_buffer: string, path to be parsed or + filepath_or_buffer : string, path to be parsed or an open readable stream. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ - def __init__(self, filepath_or_buffer: FilePathOrBuffer): + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + storage_options: StorageOptions = None, + ): import_optional_dependency("odf") - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 03a30cbd62f9a..73239190604db 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -2,7 +2,7 @@ import numpy as np -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import ExcelWriter, _BaseExcelReader @@ -467,7 +467,11 @@ def write_cells( class _OpenpyxlReader(_BaseExcelReader): - def __init__(self, filepath_or_buffer: FilePathOrBuffer) -> None: + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + storage_options: StorageOptions = None, + ) -> None: """ Reader using openpyxl engine. @@ -475,9 +479,11 @@ def __init__(self, filepath_or_buffer: FilePathOrBuffer) -> None: ---------- filepath_or_buffer : string, path object or Workbook Object to be parsed. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ import_optional_dependency("openpyxl") - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index 0d96c8c4acdb8..c0e281ff6c2da 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -1,25 +1,31 @@ from typing import List -from pandas._typing import FilePathOrBuffer, Scalar +from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import _BaseExcelReader class _PyxlsbReader(_BaseExcelReader): - def __init__(self, filepath_or_buffer: FilePathOrBuffer): + def __init__( + self, + filepath_or_buffer: FilePathOrBuffer, + storage_options: StorageOptions = None, + ): """ Reader using pyxlsb engine. Parameters ---------- - filepath_or_buffer: str, path object, or Workbook + filepath_or_buffer : str, path object, or Workbook Object to be parsed. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ import_optional_dependency("pyxlsb") # This will call load_workbook on the filepath or buffer # And set the result to the book-attribute - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index af82c15fd6b66..ff1b3c8bdb964 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -2,13 +2,14 @@ import numpy as np +from pandas._typing import StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import _BaseExcelReader class _XlrdReader(_BaseExcelReader): - def __init__(self, filepath_or_buffer): + def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): """ Reader using xlrd engine. @@ -16,10 +17,12 @@ def __init__(self, filepath_or_buffer): ---------- filepath_or_buffer : string, path object or Workbook Object to be parsed. + storage_options : StorageOptions + passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ err_msg = "Install xlrd >= 1.0.0 for Excel support" import_optional_dependency("xlrd", extra=err_msg) - super().__init__(filepath_or_buffer) + super().__init__(filepath_or_buffer, storage_options=storage_options) @property def _workbook_class(self): diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 2c664e73b9463..2d86fa44f22a4 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -1,5 +1,6 @@ """ feather-format compat """ +from pandas._typing import StorageOptions from pandas.compat._optional import import_optional_dependency from pandas import DataFrame, Int64Index, RangeIndex @@ -7,7 +8,7 @@ from pandas.io.common import get_filepath_or_buffer -def to_feather(df: DataFrame, path, storage_options=None, **kwargs): +def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kwargs): """ Write a DataFrame to the binary Feather format. @@ -77,7 +78,9 @@ def to_feather(df: DataFrame, path, storage_options=None, **kwargs): feather.write_feather(df, path, **kwargs) -def read_feather(path, columns=None, use_threads: bool = True, storage_options=None): +def read_feather( + path, columns=None, use_threads: bool = True, storage_options: StorageOptions = None +): """ Load a feather-format object from the file path. diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5d49757ce7d58..983aa56324083 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -20,7 +20,7 @@ import pandas._libs.parsers as parsers from pandas._libs.parsers import STR_NA_VALUES from pandas._libs.tslibs import parsing -from pandas._typing import FilePathOrBuffer, Union +from pandas._typing import FilePathOrBuffer, StorageOptions, Union from pandas.errors import ( AbstractMethodError, EmptyDataError, @@ -596,7 +596,7 @@ def read_csv( low_memory=_c_parser_defaults["low_memory"], memory_map=False, float_precision=None, - storage_options=None, + storage_options: StorageOptions = None, ): # gh-23761 # diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index fcee25c258efa..518f31d73efa9 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,4 +1,7 @@ import os +import shlex +import subprocess +import time import pytest @@ -31,10 +34,62 @@ def feather_file(datapath): @pytest.fixture -def s3_resource(tips_file, jsonl_file, feather_file): +def s3so(): + return dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + + +@pytest.fixture(scope="module") +def s3_base(): """ Fixture for mocking S3 interaction. + Sets up moto server in separate process + """ + pytest.importorskip("s3fs") + pytest.importorskip("boto3") + requests = pytest.importorskip("requests") + + with tm.ensure_safe_environment_variables(): + # temporary workaround as moto fails for botocore >= 1.11 otherwise, + # see https://github.com/spulec/moto/issues/1924 & 1952 + os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") + os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") + + pytest.importorskip("moto", minversion="1.3.14") + pytest.importorskip("flask") # server mode needs flask too + + # Launching moto in server mode, i.e., as a separate process + # with an S3 endpoint on localhost + + endpoint_uri = "http://127.0.0.1:5555/" + + # pipe to null to avoid logging in terminal + proc = subprocess.Popen( + shlex.split("moto_server s3 -p 5555"), stdout=subprocess.DEVNULL + ) + + timeout = 5 + while timeout > 0: + try: + # OK to go once server is accepting connections + r = requests.get(endpoint_uri) + if r.ok: + break + except Exception: + pass + timeout -= 0.1 + time.sleep(0.1) + yield + + proc.terminate() + proc.wait() + + +@pytest.fixture() +def s3_resource(s3_base, tips_file, jsonl_file, feather_file): + """ + Sets up S3 bucket with contents + The primary bucket name is "pandas-test". The following datasets are loaded. @@ -46,45 +101,59 @@ def s3_resource(tips_file, jsonl_file, feather_file): A private bucket "cant_get_it" is also created. The boto3 s3 resource is yielded by the fixture. """ - s3fs = pytest.importorskip("s3fs") - boto3 = pytest.importorskip("boto3") - - with tm.ensure_safe_environment_variables(): - # temporary workaround as moto fails for botocore >= 1.11 otherwise, - # see https://github.com/spulec/moto/issues/1924 & 1952 - os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") - os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") - - moto = pytest.importorskip("moto") - - test_s3_files = [ - ("tips#1.csv", tips_file), - ("tips.csv", tips_file), - ("tips.csv.gz", tips_file + ".gz"), - ("tips.csv.bz2", tips_file + ".bz2"), - ("items.jsonl", jsonl_file), - ("simple_dataset.feather", feather_file), - ] - - def add_tips_files(bucket_name): - for s3_key, file_name in test_s3_files: - with open(file_name, "rb") as f: - conn.Bucket(bucket_name).put_object(Key=s3_key, Body=f) - - try: - s3 = moto.mock_s3() - s3.start() - - # see gh-16135 - bucket = "pandas-test" - conn = boto3.resource("s3", region_name="us-east-1") - - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) - - conn.create_bucket(Bucket="cant_get_it", ACL="private") - add_tips_files("cant_get_it") - s3fs.S3FileSystem.clear_instance_cache() - yield conn - finally: - s3.stop() + import boto3 + import s3fs + + test_s3_files = [ + ("tips#1.csv", tips_file), + ("tips.csv", tips_file), + ("tips.csv.gz", tips_file + ".gz"), + ("tips.csv.bz2", tips_file + ".bz2"), + ("items.jsonl", jsonl_file), + ("simple_dataset.feather", feather_file), + ] + + def add_tips_files(bucket_name): + for s3_key, file_name in test_s3_files: + with open(file_name, "rb") as f: + cli.put_object(Bucket=bucket_name, Key=s3_key, Body=f) + + bucket = "pandas-test" + endpoint_uri = "http://127.0.0.1:5555/" + conn = boto3.resource("s3", endpoint_url=endpoint_uri) + cli = boto3.client("s3", endpoint_url=endpoint_uri) + + try: + cli.create_bucket(Bucket=bucket) + except: # noqa + # OK is bucket already exists + pass + try: + cli.create_bucket(Bucket="cant_get_it", ACL="private") + except: # noqa + # OK is bucket already exists + pass + timeout = 2 + while not cli.list_buckets()["Buckets"] and timeout > 0: + time.sleep(0.1) + timeout -= 0.1 + + add_tips_files(bucket) + add_tips_files("cant_get_it") + s3fs.S3FileSystem.clear_instance_cache() + yield conn + + s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + + try: + s3.rm(bucket, recursive=True) + except: # noqa + pass + try: + s3.rm("cant_get_it", recursive=True) + except: # noqa + pass + timeout = 2 + while cli.list_buckets()["Buckets"] and timeout > 0: + time.sleep(0.1) + timeout -= 0.1 diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 51fbbf836a03f..431a50477fccc 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -606,13 +606,14 @@ def test_read_from_http_url(self, read_ext): tm.assert_frame_equal(url_table, local_table) @td.skip_if_not_us_locale - def test_read_from_s3_url(self, read_ext, s3_resource): + def test_read_from_s3_url(self, read_ext, s3_resource, s3so): # Bucket "pandas-test" created in tests/io/conftest.py with open("test1" + read_ext, "rb") as f: s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f) url = "s3://pandas-test/test1" + read_ext - url_table = pd.read_excel(url) + + url_table = pd.read_excel(url, storage_options=s3so) local_table = pd.read_excel("test1" + read_ext) tm.assert_frame_equal(url_table, local_table) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 182c21ed1d416..5bb205842269e 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -44,7 +44,11 @@ def test_with_s3_url(compression, s3_resource): with open(path, "rb") as f: s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f) - roundtripped_df = pd.read_json("s3://pandas-test/test-1", compression=compression) + roundtripped_df = pd.read_json( + "s3://pandas-test/test-1", + compression=compression, + storage_options=dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}), + ) tm.assert_frame_equal(df, roundtripped_df) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 1280d0fd434d5..64a666079876f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1213,10 +1213,12 @@ def test_read_inline_jsonl(self): tm.assert_frame_equal(result, expected) @td.skip_if_not_us_locale - def test_read_s3_jsonl(self, s3_resource): + def test_read_s3_jsonl(self, s3_resource, s3so): # GH17200 - result = read_json("s3n://pandas-test/items.jsonl", lines=True) + result = read_json( + "s3n://pandas-test/items.jsonl", lines=True, storage_options=s3so + ) expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"]) tm.assert_frame_equal(result, expected) @@ -1706,7 +1708,12 @@ def test_to_s3(self, s3_resource): # GH 28375 mock_bucket_name, target_file = "pandas-test", "test.json" df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) - df.to_json(f"s3://{mock_bucket_name}/{target_file}") + df.to_json( + f"s3://{mock_bucket_name}/{target_file}", + storage_options=dict( + client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"} + ), + ) timeout = 5 while True: if target_file in ( diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index b30a7b1ef34de..b8b03cbd14a1d 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -71,50 +71,62 @@ def tips_df(datapath): @td.skip_if_not_us_locale() class TestS3: @td.skip_if_no("s3fs") - def test_parse_public_s3_bucket(self, tips_df): + def test_parse_public_s3_bucket(self, tips_df, s3so): # more of an integration test due to the not-public contents portion # can probably mock this though. for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: - df = read_csv("s3://pandas-test/tips.csv" + ext, compression=comp) + df = read_csv( + "s3://pandas-test/tips.csv" + ext, + compression=comp, + storage_options=s3so, + ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) # Read public file from bucket with not-public contents - df = read_csv("s3://cant_get_it/tips.csv") + df = read_csv("s3://cant_get_it/tips.csv", storage_options=s3so) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3n_bucket(self, tips_df): + def test_parse_public_s3n_bucket(self, tips_df, s3so): # Read from AWS s3 as "s3n" URL - df = read_csv("s3n://pandas-test/tips.csv", nrows=10) + df = read_csv("s3n://pandas-test/tips.csv", nrows=10, storage_options=s3so) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3a_bucket(self, tips_df): + def test_parse_public_s3a_bucket(self, tips_df, s3so): # Read from AWS s3 as "s3a" URL - df = read_csv("s3a://pandas-test/tips.csv", nrows=10) + df = read_csv("s3a://pandas-test/tips.csv", nrows=10, storage_options=s3so) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_nrows(self, tips_df): + def test_parse_public_s3_bucket_nrows(self, tips_df, s3so): for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: - df = read_csv("s3://pandas-test/tips.csv" + ext, nrows=10, compression=comp) + df = read_csv( + "s3://pandas-test/tips.csv" + ext, + nrows=10, + compression=comp, + storage_options=s3so, + ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_parse_public_s3_bucket_chunked(self, tips_df): + def test_parse_public_s3_bucket_chunked(self, tips_df, s3so): # Read with a chunksize chunksize = 5 for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: df_reader = read_csv( - "s3://pandas-test/tips.csv" + ext, chunksize=chunksize, compression=comp + "s3://pandas-test/tips.csv" + ext, + chunksize=chunksize, + compression=comp, + storage_options=s3so, ) assert df_reader.chunksize == chunksize for i_chunk in [0, 1, 2]: @@ -126,7 +138,7 @@ def test_parse_public_s3_bucket_chunked(self, tips_df): true_df = tips_df.iloc[chunksize * i_chunk : chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_chunked_python(self, tips_df): + def test_parse_public_s3_bucket_chunked_python(self, tips_df, s3so): # Read with a chunksize using the Python parser chunksize = 5 for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: @@ -135,6 +147,7 @@ def test_parse_public_s3_bucket_chunked_python(self, tips_df): chunksize=chunksize, compression=comp, engine="python", + storage_options=s3so, ) assert df_reader.chunksize == chunksize for i_chunk in [0, 1, 2]: @@ -145,46 +158,53 @@ def test_parse_public_s3_bucket_chunked_python(self, tips_df): true_df = tips_df.iloc[chunksize * i_chunk : chunksize * (i_chunk + 1)] tm.assert_frame_equal(true_df, df) - def test_parse_public_s3_bucket_python(self, tips_df): + def test_parse_public_s3_bucket_python(self, tips_df, s3so): for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: df = read_csv( - "s3://pandas-test/tips.csv" + ext, engine="python", compression=comp + "s3://pandas-test/tips.csv" + ext, + engine="python", + compression=comp, + storage_options=s3so, ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) - def test_infer_s3_compression(self, tips_df): + def test_infer_s3_compression(self, tips_df, s3so): for ext in ["", ".gz", ".bz2"]: df = read_csv( - "s3://pandas-test/tips.csv" + ext, engine="python", compression="infer" + "s3://pandas-test/tips.csv" + ext, + engine="python", + compression="infer", + storage_options=s3so, ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(df, tips_df) - def test_parse_public_s3_bucket_nrows_python(self, tips_df): + def test_parse_public_s3_bucket_nrows_python(self, tips_df, s3so): for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]: df = read_csv( "s3://pandas-test/tips.csv" + ext, engine="python", nrows=10, compression=comp, + storage_options=s3so, ) assert isinstance(df, DataFrame) assert not df.empty tm.assert_frame_equal(tips_df.iloc[:10], df) - def test_read_s3_fails(self): + def test_read_s3_fails(self, s3so): with pytest.raises(IOError): - read_csv("s3://nyqpug/asdf.csv") + read_csv("s3://nyqpug/asdf.csv", storage_options=s3so) # Receive a permission error when trying to read a private bucket. # It's irrelevant here that this isn't actually a table. with pytest.raises(IOError): read_csv("s3://cant_get_it/file.csv") - def test_write_s3_csv_fails(self, tips_df): + def test_write_s3_csv_fails(self, tips_df, s3so): # GH 32486 # Attempting to write to an invalid S3 path should raise import botocore @@ -195,10 +215,12 @@ def test_write_s3_csv_fails(self, tips_df): error = (FileNotFoundError, botocore.exceptions.ClientError) with pytest.raises(error, match="The specified bucket does not exist"): - tips_df.to_csv("s3://an_s3_bucket_data_doesnt_exit/not_real.csv") + tips_df.to_csv( + "s3://an_s3_bucket_data_doesnt_exit/not_real.csv", storage_options=s3so + ) @td.skip_if_no("pyarrow") - def test_write_s3_parquet_fails(self, tips_df): + def test_write_s3_parquet_fails(self, tips_df, s3so): # GH 27679 # Attempting to write to an invalid S3 path should raise import botocore @@ -209,7 +231,10 @@ def test_write_s3_parquet_fails(self, tips_df): error = (FileNotFoundError, botocore.exceptions.ClientError) with pytest.raises(error, match="The specified bucket does not exist"): - tips_df.to_parquet("s3://an_s3_bucket_data_doesnt_exit/not_real.parquet") + tips_df.to_parquet( + "s3://an_s3_bucket_data_doesnt_exit/not_real.parquet", + storage_options=s3so, + ) def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file): # see gh-16135 @@ -225,7 +250,7 @@ def test_read_csv_handles_boto_s3_object(self, s3_resource, tips_file): expected = read_csv(tips_file) tm.assert_frame_equal(result, expected) - def test_read_csv_chunked_download(self, s3_resource, caplog): + def test_read_csv_chunked_download(self, s3_resource, caplog, s3so): # 8 MB, S3FS usees 5MB chunks import s3fs @@ -245,18 +270,20 @@ def test_read_csv_chunked_download(self, s3_resource, caplog): s3fs.S3FileSystem.clear_instance_cache() with caplog.at_level(logging.DEBUG, logger="s3fs"): - read_csv("s3://pandas-test/large-file.csv", nrows=5) + read_csv("s3://pandas-test/large-file.csv", nrows=5, storage_options=s3so) # log of fetch_range (start, stop) assert (0, 5505024) in (x.args[-2:] for x in caplog.records) - def test_read_s3_with_hash_in_key(self, tips_df): + def test_read_s3_with_hash_in_key(self, tips_df, s3so): # GH 25945 - result = read_csv("s3://pandas-test/tips#1.csv") + result = read_csv("s3://pandas-test/tips#1.csv", storage_options=s3so) tm.assert_frame_equal(tips_df, result) @td.skip_if_no("pyarrow") - def test_read_feather_s3_file_path(self, feather_file): + def test_read_feather_s3_file_path(self, feather_file, s3so): # GH 29055 expected = read_feather(feather_file) - res = read_feather("s3://pandas-test/simple_dataset.feather") + res = read_feather( + "s3://pandas-test/simple_dataset.feather", storage_options=s3so + ) tm.assert_frame_equal(expected, res) diff --git a/pandas/tests/io/test_fsspec.py b/pandas/tests/io/test_fsspec.py index 3e89f6ca4ae16..666da677d702e 100644 --- a/pandas/tests/io/test_fsspec.py +++ b/pandas/tests/io/test_fsspec.py @@ -131,27 +131,38 @@ def test_fastparquet_options(fsspectest): @td.skip_if_no("s3fs") -def test_from_s3_csv(s3_resource, tips_file): - tm.assert_equal(read_csv("s3://pandas-test/tips.csv"), read_csv(tips_file)) +def test_from_s3_csv(s3_resource, tips_file, s3so): + tm.assert_equal( + read_csv("s3://pandas-test/tips.csv", storage_options=s3so), read_csv(tips_file) + ) # the following are decompressed by pandas, not fsspec - tm.assert_equal(read_csv("s3://pandas-test/tips.csv.gz"), read_csv(tips_file)) - tm.assert_equal(read_csv("s3://pandas-test/tips.csv.bz2"), read_csv(tips_file)) + tm.assert_equal( + read_csv("s3://pandas-test/tips.csv.gz", storage_options=s3so), + read_csv(tips_file), + ) + tm.assert_equal( + read_csv("s3://pandas-test/tips.csv.bz2", storage_options=s3so), + read_csv(tips_file), + ) @pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"]) @td.skip_if_no("s3fs") -def test_s3_protocols(s3_resource, tips_file, protocol): +def test_s3_protocols(s3_resource, tips_file, protocol, s3so): tm.assert_equal( - read_csv("%s://pandas-test/tips.csv" % protocol), read_csv(tips_file) + read_csv("%s://pandas-test/tips.csv" % protocol, storage_options=s3so), + read_csv(tips_file), ) @td.skip_if_no("s3fs") @td.skip_if_no("fastparquet") -def test_s3_parquet(s3_resource): +def test_s3_parquet(s3_resource, s3so): fn = "s3://pandas-test/test.parquet" - df1.to_parquet(fn, index=False, engine="fastparquet", compression=None) - df2 = read_parquet(fn, engine="fastparquet") + df1.to_parquet( + fn, index=False, engine="fastparquet", compression=None, storage_options=s3so + ) + df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so) tm.assert_equal(df1, df2) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 82157f3d722a9..3a3ba99484a3a 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -158,6 +158,10 @@ def check_round_trip( """ write_kwargs = write_kwargs or {"compression": None} read_kwargs = read_kwargs or {} + if isinstance(path, str) and "s3://" in path: + s3so = dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + read_kwargs["storage_options"] = s3so + write_kwargs["storage_options"] = s3so if expected is None: expected = df @@ -537,9 +541,11 @@ def test_categorical(self, pa): expected = df.astype(object) check_round_trip(df, pa, expected=expected) - def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa): + def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so): s3fs = pytest.importorskip("s3fs") - s3 = s3fs.S3FileSystem() + if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"): + pytest.skip() + s3 = s3fs.S3FileSystem(**s3so) kw = dict(filesystem=s3) check_round_trip( df_compat, @@ -550,6 +556,8 @@ def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa): ) def test_s3_roundtrip(self, df_compat, s3_resource, pa): + if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"): + pytest.skip() # GH #19134 check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet") @@ -560,10 +568,13 @@ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): # https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716 # As per pyarrow partitioned columns become 'categorical' dtypes # and are added to back of dataframe on read + if partition_col and pd.compat.is_platform_windows(): + pytest.skip("pyarrow/win incompatibility #35791") expected_df = df_compat.copy() if partition_col: expected_df[partition_col] = expected_df[partition_col].astype("category") + check_round_trip( df_compat, pa, diff --git a/requirements-dev.txt b/requirements-dev.txt index deaed8ab9d5f1..2fbb20ddfd3bf 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -32,6 +32,7 @@ boto3 botocore>=1.11 hypothesis>=3.82 moto +flask pytest>=5.0.1 pytest-cov pytest-xdist>=1.21 From 0e199f3d490045d592715b3e95a0203729f17a0f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 21 Aug 2020 14:07:28 -0700 Subject: [PATCH 307/632] REF: simplify _cython_agg_blocks (#35841) --- pandas/core/groupby/generic.py | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 166631e69f523..60e23b14eaf09 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -24,7 +24,6 @@ Tuple, Type, Union, - cast, ) import warnings @@ -1100,24 +1099,19 @@ def blk_func(block: "Block") -> List["Block"]: # continue and exclude the block raise else: - result = cast(DataFrame, result) + assert isinstance(result, (Series, DataFrame)) # for mypy + # In the case of object dtype block, it may have been split + # in the operation. We un-split here. + result = result._consolidate() + assert isinstance(result, (Series, DataFrame)) # for mypy + assert len(result._mgr.blocks) == 1 + # unwrap DataFrame to get array - if len(result._mgr.blocks) != 1: - # We've split an object block! Everything we've assumed - # about a single block input returning a single block output - # is a lie. To keep the code-path for the typical non-split case - # clean, we choose to clean up this mess later on. - assert len(locs) == result.shape[1] - for i, loc in enumerate(locs): - agg_block = result.iloc[:, [i]]._mgr.blocks[0] - agg_block.mgr_locs = [loc] - new_blocks.append(agg_block) - else: - result = result._mgr.blocks[0].values - if isinstance(result, np.ndarray) and result.ndim == 1: - result = result.reshape(1, -1) - agg_block = cast_result_block(result, block, how) - new_blocks = [agg_block] + result = result._mgr.blocks[0].values + if isinstance(result, np.ndarray) and result.ndim == 1: + result = result.reshape(1, -1) + agg_block = cast_result_block(result, block, how) + new_blocks = [agg_block] else: agg_block = cast_result_block(result, block, how) new_blocks = [agg_block] From 4e3940f8457a9eb7158660f95ec2ece2b80ed232 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 21 Aug 2020 14:11:33 -0700 Subject: [PATCH 308/632] CI: avoid file leak from ipython tests (#35836) --- pandas/conftest.py | 8 +++++++- pandas/tests/frame/test_api.py | 2 ++ pandas/tests/io/formats/test_format.py | 2 ++ pandas/tests/resample/test_resampler_grouper.py | 2 ++ pandas/tests/series/test_api.py | 2 ++ 5 files changed, 15 insertions(+), 1 deletion(-) diff --git a/pandas/conftest.py b/pandas/conftest.py index 97cc514e31bb3..0878380d00837 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1181,7 +1181,13 @@ def ip(): pytest.importorskip("IPython", minversion="6.0.0") from IPython.core.interactiveshell import InteractiveShell - return InteractiveShell() + # GH#35711 make sure sqlite history file handle is not leaked + from traitlets.config import Config # noqa: F401 isort:skip + + c = Config() + c.HistoryManager.hist_file = ":memory:" + + return InteractiveShell(config=c) @pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"]) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 2fb1f7f911a9c..0716cf5e27119 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -6,6 +6,7 @@ import numpy as np import pytest +import pandas.util._test_decorators as td from pandas.util._test_decorators import async_mark, skip_if_no import pandas as pd @@ -521,6 +522,7 @@ def _check_f(base, f): _check_f(d.copy(), f) @async_mark() + @td.check_file_leaks async def test_tab_complete_warning(self, ip): # GH 16409 pytest.importorskip("IPython", minversion="6.0.0") diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 84805d06df4a8..1bbfe4d7d74af 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -19,6 +19,7 @@ import pytz from pandas.compat import is_platform_32bit, is_platform_windows +import pandas.util._test_decorators as td import pandas as pd from pandas import ( @@ -3338,6 +3339,7 @@ def test_format_percentiles_integer_idx(): assert result == expected +@td.check_file_leaks def test_repr_html_ipython_config(ip): code = textwrap.dedent( """\ diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index b36b11582c1ec..f18aaa5e86829 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -3,6 +3,7 @@ import numpy as np import pytest +import pandas.util._test_decorators as td from pandas.util._test_decorators import async_mark import pandas as pd @@ -17,6 +18,7 @@ @async_mark() +@td.check_file_leaks async def test_tab_complete_ipython6_warning(ip): from IPython.core.completer import provisionalcompleter diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index b174eb0e42776..d81e8a4f82ffb 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -5,6 +5,7 @@ import numpy as np import pytest +import pandas.util._test_decorators as td from pandas.util._test_decorators import async_mark import pandas as pd @@ -486,6 +487,7 @@ def test_empty_method(self): assert not full_series.empty @async_mark() + @td.check_file_leaks async def test_tab_complete_warning(self, ip): # https://github.com/pandas-dev/pandas/issues/16409 pytest.importorskip("IPython", minversion="6.0.0") From 5746d34ac1ec8d7c45aa768e6ae62a4bd3cae427 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Fri, 21 Aug 2020 16:12:58 -0500 Subject: [PATCH 309/632] CI/DOC: unpin gitdb #35823 (#35824) --- environment.yml | 2 +- requirements-dev.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 6afc19c227512..96f2c8d2086c7 100644 --- a/environment.yml +++ b/environment.yml @@ -26,7 +26,7 @@ dependencies: # documentation - gitpython # obtain contributors from git for whatsnew - - gitdb2=2.0.6 # GH-32060 + - gitdb - sphinx # documentation (jupyter notebooks) diff --git a/requirements-dev.txt b/requirements-dev.txt index 2fbb20ddfd3bf..1fca25c9fecd9 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -15,7 +15,7 @@ isort>=5.2.1 mypy==0.730 pycodestyle gitpython -gitdb2==2.0.6 +gitdb sphinx nbconvert>=5.4.1 nbsphinx From 7e919373236a538b9157359af9c7b8ca3d32e920 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 21 Aug 2020 14:53:51 -0700 Subject: [PATCH 310/632] BUG: issubclass check with dtype instead of type, closes GH#24883 (#35794) --- doc/source/whatsnew/v1.1.2.rst | 2 +- pandas/core/computation/ops.py | 14 +++++++++++--- pandas/tests/frame/test_query_eval.py | 7 +++++++ 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 81acd567027e5..7bd547bf03a87 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -24,7 +24,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - +- Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - - diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index bc9ff7c44b689..e55df1e1d8155 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -481,13 +481,21 @@ def stringify(value): self.lhs.update(v) def _disallow_scalar_only_bool_ops(self): + rhs = self.rhs + lhs = self.lhs + + # GH#24883 unwrap dtype if necessary to ensure we have a type object + rhs_rt = rhs.return_type + rhs_rt = getattr(rhs_rt, "type", rhs_rt) + lhs_rt = lhs.return_type + lhs_rt = getattr(lhs_rt, "type", lhs_rt) if ( - (self.lhs.is_scalar or self.rhs.is_scalar) + (lhs.is_scalar or rhs.is_scalar) and self.op in _bool_ops_dict and ( not ( - issubclass(self.rhs.return_type, (bool, np.bool_)) - and issubclass(self.lhs.return_type, (bool, np.bool_)) + issubclass(rhs_rt, (bool, np.bool_)) + and issubclass(lhs_rt, (bool, np.bool_)) ) ) ): diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 628b955a1de92..56d178daee7fd 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -160,6 +160,13 @@ def test_eval_resolvers_as_list(self): assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"] + def test_eval_object_dtype_binop(self): + # GH#24883 + df = pd.DataFrame({"a1": ["Y", "N"]}) + res = df.eval("c = ((a1 == 'Y') & True)") + expected = pd.DataFrame({"a1": ["Y", "N"], "c": [True, False]}) + tm.assert_frame_equal(res, expected) + class TestDataFrameQueryWithMultiIndex: def test_query_with_named_multiindex(self, parser, engine): From 94680719fca2acaaed77ffc6f74c57640de5a6ab Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 21 Aug 2020 14:56:50 -0700 Subject: [PATCH 311/632] REF: _apply_blockwise define exclude in terms of skipped (#35740) --- pandas/core/window/rolling.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index ac96258cbc3c9..f516871f789d0 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -12,7 +12,7 @@ from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import ArrayLike, Axis, FrameOrSeries, Scalar +from pandas._typing import ArrayLike, Axis, FrameOrSeries, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -381,21 +381,31 @@ def _wrap_result(self, result, block=None, obj=None): return type(obj)(result, index=index, columns=block.columns) return result - def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries: + def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeries: """ Wrap the results. Parameters ---------- results : list of ndarrays - blocks : list of blocks obj : conformed data (may be resampled) - exclude: list of columns to exclude, default to None + skipped: List[int] + Indices of blocks that are skipped. """ from pandas import Series, concat + exclude: List[Label] = [] + if obj.ndim == 2: + orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) + for i in skipped: + exclude.extend(orig_blocks[i].columns) + else: + orig_blocks = [obj] + + kept_blocks = [blk for i, blk in enumerate(orig_blocks) if i not in skipped] + final = [] - for result, block in zip(results, blocks): + for result, block in zip(results, kept_blocks): result = self._wrap_result(result, block=block, obj=obj) if result.ndim == 1: @@ -491,7 +501,6 @@ def _apply_blockwise( skipped: List[int] = [] results: List[ArrayLike] = [] - exclude: List[Scalar] = [] for i, b in enumerate(blocks): try: values = self._prep_values(b.values) @@ -499,7 +508,6 @@ def _apply_blockwise( except (TypeError, NotImplementedError) as err: if isinstance(obj, ABCDataFrame): skipped.append(i) - exclude.extend(b.columns) continue else: raise DataError("No numeric types to aggregate") from err @@ -507,8 +515,7 @@ def _apply_blockwise( result = homogeneous_func(values) results.append(result) - block_list = [blk for i, blk in enumerate(blocks) if i not in skipped] - return self._wrap_results(results, block_list, obj, exclude) + return self._wrap_results(results, obj, skipped) def _apply( self, @@ -1283,7 +1290,7 @@ def count(self): ).sum() results.append(result) - return self._wrap_results(results, blocks, obj) + return self._wrap_results(results, obj, skipped=[]) _shared_docs["apply"] = dedent( r""" From 8bf2bc4a049642154ece3168621854d275710485 Mon Sep 17 00:00:00 2001 From: Karthik Mathur <22126205+mathurk1@users.noreply.github.com> Date: Fri, 21 Aug 2020 17:34:51 -0500 Subject: [PATCH 312/632] TST: add test for agg on ordered categorical cols (#35630) --- .../tests/groupby/aggregate/test_aggregate.py | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index ce9d4b892d775..8fe450fe6abfc 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1063,6 +1063,85 @@ def test_groupby_get_by_index(): pd.testing.assert_frame_equal(res, expected) +@pytest.mark.parametrize( + "grp_col_dict, exp_data", + [ + ({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}), + ({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}), + ({"nr": "min"}, {"nr": [1, 5]}), + ], +) +def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data): + # test single aggregations on ordered categorical cols GHGH27800 + + # create the result dataframe + input_df = pd.DataFrame( + { + "nr": [1, 2, 3, 4, 5, 6, 7, 8], + "cat_ord": list("aabbccdd"), + "cat": list("aaaabbbb"), + } + ) + + input_df = input_df.astype({"cat": "category", "cat_ord": "category"}) + input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered() + result_df = input_df.groupby("cat").agg(grp_col_dict) + + # create expected dataframe + cat_index = pd.CategoricalIndex( + ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category" + ) + + expected_df = pd.DataFrame(data=exp_data, index=cat_index) + + tm.assert_frame_equal(result_df, expected_df) + + +@pytest.mark.parametrize( + "grp_col_dict, exp_data", + [ + ({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]), + ({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]), + ({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]), + ], +) +def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data): + # test combined aggregations on ordered categorical cols GH27800 + + # create the result dataframe + input_df = pd.DataFrame( + { + "nr": [1, 2, 3, 4, 5, 6, 7, 8], + "cat_ord": list("aabbccdd"), + "cat": list("aaaabbbb"), + } + ) + + input_df = input_df.astype({"cat": "category", "cat_ord": "category"}) + input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered() + result_df = input_df.groupby("cat").agg(grp_col_dict) + + # create expected dataframe + cat_index = pd.CategoricalIndex( + ["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category" + ) + + # unpack the grp_col_dict to create the multi-index tuple + # this tuple will be used to create the expected dataframe index + multi_index_list = [] + for k, v in grp_col_dict.items(): + if isinstance(v, list): + for value in v: + multi_index_list.append([k, value]) + else: + multi_index_list.append([k, v]) + multi_index = pd.MultiIndex.from_tuples(tuple(multi_index_list)) + + expected_df = pd.DataFrame(data=exp_data, columns=multi_index, index=cat_index) + + tm.assert_frame_equal(result_df, expected_df) + + def test_nonagg_agg(): # GH 35490 - Single/Multiple agg of non-agg function give same results # TODO: agg should raise for functions that don't aggregate From e2a622c0f6451239a6d28716f77fa8b06e606f55 Mon Sep 17 00:00:00 2001 From: tkmz-n <60312218+tkmz-n@users.noreply.github.com> Date: Sat, 22 Aug 2020 07:42:50 +0900 Subject: [PATCH 313/632] TST: resample does not yield empty groups (#10603) (#35799) --- pandas/tests/resample/test_timedelta.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pandas/tests/resample/test_timedelta.py b/pandas/tests/resample/test_timedelta.py index 0fbb60c176b30..3fa85e62d028c 100644 --- a/pandas/tests/resample/test_timedelta.py +++ b/pandas/tests/resample/test_timedelta.py @@ -150,3 +150,18 @@ def test_resample_timedelta_edge_case(start, end, freq, resample_freq): tm.assert_index_equal(result.index, expected_index) assert result.index.freq == expected_index.freq assert not np.isnan(result[-1]) + + +def test_resample_with_timedelta_yields_no_empty_groups(): + # GH 10603 + df = pd.DataFrame( + np.random.normal(size=(10000, 4)), + index=pd.timedelta_range(start="0s", periods=10000, freq="3906250n"), + ) + result = df.loc["1s":, :].resample("3s").apply(lambda x: len(x)) + + expected = pd.DataFrame( + [[768.0] * 4] * 12 + [[528.0] * 4], + index=pd.timedelta_range(start="1s", periods=13, freq="3s"), + ) + tm.assert_frame_equal(result, expected) From 3ff1b5fe741f018bda5836b49aba81bc7a53b60a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 21 Aug 2020 19:02:01 -0700 Subject: [PATCH 314/632] REF: remove unnecesary try/except (#35839) --- pandas/core/groupby/generic.py | 61 ++++++++++++++++------------------ 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 60e23b14eaf09..4b1f6cfe0a662 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -30,7 +30,7 @@ import numpy as np from pandas._libs import lib -from pandas._typing import FrameOrSeries, FrameOrSeriesUnion +from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.cast import ( @@ -59,6 +59,7 @@ validate_func_kwargs, ) import pandas.core.algorithms as algorithms +from pandas.core.arrays import ExtensionArray from pandas.core.base import DataError, SpecificationError import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype @@ -1033,32 +1034,31 @@ def _cython_agg_blocks( no_result = object() - def cast_result_block(result, block: "Block", how: str) -> "Block": - # see if we can cast the block to the desired dtype + def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike: + # see if we can cast the values to the desired dtype # this may not be the original dtype assert not isinstance(result, DataFrame) assert result is not no_result - dtype = maybe_cast_result_dtype(block.dtype, how) + dtype = maybe_cast_result_dtype(values.dtype, how) result = maybe_downcast_numeric(result, dtype) - if block.is_extension and isinstance(result, np.ndarray): - # e.g. block.values was an IntegerArray - # (1, N) case can occur if block.values was Categorical + if isinstance(values, ExtensionArray) and isinstance(result, np.ndarray): + # e.g. values was an IntegerArray + # (1, N) case can occur if values was Categorical # and result is ndarray[object] # TODO(EA2D): special casing not needed with 2D EAs assert result.ndim == 1 or result.shape[0] == 1 try: # Cast back if feasible - result = type(block.values)._from_sequence( - result.ravel(), dtype=block.values.dtype + result = type(values)._from_sequence( + result.ravel(), dtype=values.dtype ) except (ValueError, TypeError): # reshape to be valid for non-Extension Block result = result.reshape(1, -1) - agg_block: "Block" = block.make_block(result) - return agg_block + return result def blk_func(block: "Block") -> List["Block"]: new_blocks: List["Block"] = [] @@ -1092,28 +1092,25 @@ def blk_func(block: "Block") -> List["Block"]: # Categoricals. This will done by later self._reindex_output() # Doing it here creates an error. See GH#34951 sgb = get_groupby(obj, self.grouper, observed=True) - try: - result = sgb.aggregate(lambda x: alt(x, axis=self.axis)) - except TypeError: - # we may have an exception in trying to aggregate - # continue and exclude the block - raise - else: - assert isinstance(result, (Series, DataFrame)) # for mypy - # In the case of object dtype block, it may have been split - # in the operation. We un-split here. - result = result._consolidate() - assert isinstance(result, (Series, DataFrame)) # for mypy - assert len(result._mgr.blocks) == 1 - - # unwrap DataFrame to get array - result = result._mgr.blocks[0].values - if isinstance(result, np.ndarray) and result.ndim == 1: - result = result.reshape(1, -1) - agg_block = cast_result_block(result, block, how) - new_blocks = [agg_block] + result = sgb.aggregate(lambda x: alt(x, axis=self.axis)) + + assert isinstance(result, (Series, DataFrame)) # for mypy + # In the case of object dtype block, it may have been split + # in the operation. We un-split here. + result = result._consolidate() + assert isinstance(result, (Series, DataFrame)) # for mypy + assert len(result._mgr.blocks) == 1 + + # unwrap DataFrame to get array + result = result._mgr.blocks[0].values + if isinstance(result, np.ndarray) and result.ndim == 1: + result = result.reshape(1, -1) + res_values = cast_agg_result(result, block.values, how) + agg_block = block.make_block(res_values) + new_blocks = [agg_block] else: - agg_block = cast_result_block(result, block, how) + res_values = cast_agg_result(result, block.values, how) + agg_block = block.make_block(res_values) new_blocks = [agg_block] return new_blocks From 82f9a0eaa6b7bdf841ace05f5a23f0e54b63dd32 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 21 Aug 2020 19:04:20 -0700 Subject: [PATCH 315/632] BUG: DataFrame.apply with result_type=reduce incorrect index (#35777) * BUG: DataFrame.apply with result_type=reduce incorrect index * move whatsnew to 1.1.1 * move whatsnew --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/apply.py | 5 ++++- pandas/tests/frame/apply/test_frame_apply.py | 9 +++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 7bd547bf03a87..c1b73c60be92b 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -25,6 +25,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) +- Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - - diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 6d44cf917a07a..99a9e1377563c 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -340,7 +340,10 @@ def wrap_results_for_axis( if self.result_type == "reduce": # e.g. test_apply_dict GH#8735 - return self.obj._constructor_sliced(results) + res = self.obj._constructor_sliced(results) + res.index = res_index + return res + elif self.result_type is None and all( isinstance(x, dict) for x in results.values() ): diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 538978358c8e7..5a1e448beb40f 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1541,3 +1541,12 @@ def func(row): tm.assert_frame_equal(result, expected) tm.assert_frame_equal(df, result) + + +def test_apply_empty_list_reduce(): + # GH#35683 get columns correct + df = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], columns=["a", "b"]) + + result = df.apply(lambda x: [], result_type="reduce") + expected = pd.Series({"a": [], "b": []}, dtype=object) + tm.assert_series_equal(result, expected) From 3269f548a7c601cc97c7d54bf6681de609e05b4c Mon Sep 17 00:00:00 2001 From: Chankey Pathak Date: Sat, 22 Aug 2020 08:38:19 +0530 Subject: [PATCH 316/632] Avoid redirect (#35674) --- doc/source/getting_started/tutorials.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/getting_started/tutorials.rst b/doc/source/getting_started/tutorials.rst index 4c2d0621c6103..b8940d2efed2f 100644 --- a/doc/source/getting_started/tutorials.rst +++ b/doc/source/getting_started/tutorials.rst @@ -94,4 +94,4 @@ Various tutorials * `Intro to pandas data structures, by Greg Reda `_ * `Pandas and Python: Top 10, by Manish Amde `_ * `Pandas DataFrames Tutorial, by Karlijn Willems `_ -* `A concise tutorial with real life examples `_ +* `A concise tutorial with real life examples `_ From 068e6542b0b28d80d8ea5953b6a49a2608f6a541 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Fri, 21 Aug 2020 20:30:05 -0700 Subject: [PATCH 317/632] PERF: Allow jitting of groupby agg loop (#35759) * Roll back groupby agg changes * Add aggragate_with_numba * Fix cases where operation on Series inputs * Simplify case, handle Series correctly * Ensure function is being cached, validate the udf signature for groupby agg * Move some functionality to groupby/numba_.py * Change ValueError to NotImplementedError * Comment that it's only 1 function that is supported * Add whatsnew * Add issue number and correct typing * Add docstring for _aggregate_with_numba * Lint Co-authored-by: Matt Roeschke --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/groupby/generic.py | 45 +++-- pandas/core/groupby/groupby.py | 70 +++++--- pandas/core/groupby/numba_.py | 172 +++++++++++++++++++ pandas/core/groupby/ops.py | 48 +----- pandas/core/util/numba_.py | 93 ---------- pandas/tests/groupby/aggregate/test_numba.py | 24 ++- 7 files changed, 274 insertions(+), 180 deletions(-) create mode 100644 pandas/core/groupby/numba_.py diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3e6ed1cdf8f7e..09a5bcb0917c2 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -154,7 +154,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- +- Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 4b1f6cfe0a662..0edbfe3d67ca5 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -70,19 +70,16 @@ GroupBy, _agg_template, _apply_docs, + _group_selection_context, _transform_template, get_groupby, ) +from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same import pandas.core.indexes.base as ibase from pandas.core.internals import BlockManager, make_block from pandas.core.series import Series -from pandas.core.util.numba_ import ( - NUMBA_FUNC_CACHE, - generate_numba_func, - maybe_use_numba, - split_for_numba, -) +from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.plotting import boxplot_frame_groupby @@ -230,6 +227,18 @@ def apply(self, func, *args, **kwargs): ) def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + if maybe_use_numba(engine): + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + with _group_selection_context(self): + data = self._selected_obj + result, index = self._aggregate_with_numba( + data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + return self.obj._constructor(result.ravel(), index=index, name=data.name) + relabeling = func is None columns = None if relabeling: @@ -252,16 +261,11 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) return getattr(self, cyfunc)() if self.grouper.nkeys > 1: - return self._python_agg_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) + return self._python_agg_general(func, *args, **kwargs) try: - return self._python_agg_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) + return self._python_agg_general(func, *args, **kwargs) except (ValueError, KeyError): - # Do not catch Numba errors here, we want to raise and not fall back. # TODO: KeyError is raised in _python_agg_general, # see see test_groupby.test_basic result = self._aggregate_named(func, *args, **kwargs) @@ -937,12 +941,19 @@ class DataFrameGroupBy(GroupBy[DataFrame]): ) def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): - relabeling, func, columns, order = reconstruct_func(func, **kwargs) - if maybe_use_numba(engine): - return self._python_agg_general( - func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + with _group_selection_context(self): + data = self._selected_obj + result, index = self._aggregate_with_numba( + data, func, *args, engine_kwargs=engine_kwargs, **kwargs ) + return self.obj._constructor(result, index=index, columns=data.columns) + + relabeling, func, columns, order = reconstruct_func(func, **kwargs) result, how = self._aggregate(func, *args, **kwargs) if how is None: diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 0047877ef78ee..f96b488fb8d0d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -34,7 +34,7 @@ class providing the base-class of operations. from pandas._config.config import option_context -from pandas._libs import Timestamp +from pandas._libs import Timestamp, lib import pandas._libs.groupby as libgroupby from pandas._typing import F, FrameOrSeries, FrameOrSeriesUnion, Scalar from pandas.compat.numpy import function as nv @@ -61,11 +61,11 @@ class providing the base-class of operations. import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.groupby import base, ops +from pandas.core.groupby import base, numba_, ops from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex from pandas.core.series import Series from pandas.core.sorting import get_group_index_sorter -from pandas.core.util.numba_ import maybe_use_numba +from pandas.core.util.numba_ import NUMBA_FUNC_CACHE _common_see_also = """ See Also @@ -384,7 +384,8 @@ class providing the base-class of operations. - dict of axis labels -> functions, function names or list of such. Can also accept a Numba JIT function with - ``engine='numba'`` specified. + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. If the ``'numba'`` engine is chosen, the function must be a user defined function with ``values`` and ``index`` as the @@ -1053,12 +1054,43 @@ def _cython_agg_general( return self._wrap_aggregated_output(output, index=self.grouper.result_index) - def _python_agg_general( - self, func, *args, engine="cython", engine_kwargs=None, **kwargs - ): + def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby aggregation routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + group_keys = self.grouper._get_group_keys() + labels, _, n_groups = self.grouper.group_info + sorted_index = get_group_index_sorter(labels, n_groups) + sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + starts, ends = lib.generate_slices(sorted_labels, n_groups) + cache_key = (func, "groupby_agg") + if cache_key in NUMBA_FUNC_CACHE: + # Return an already compiled version of roll_apply if available + numba_agg_func = NUMBA_FUNC_CACHE[cache_key] + else: + numba_agg_func = numba_.generate_numba_agg_func( + tuple(args), kwargs, func, engine_kwargs + ) + result = numba_agg_func( + sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns), + ) + if cache_key not in NUMBA_FUNC_CACHE: + NUMBA_FUNC_CACHE[cache_key] = numba_agg_func + + if self.grouper.nkeys > 1: + index = MultiIndex.from_tuples(group_keys, names=self.grouper.names) + else: + index = Index(group_keys, name=self.grouper.names[0]) + return result, index + + def _python_agg_general(self, func, *args, **kwargs): func = self._is_builtin_func(func) - if engine != "numba": - f = lambda x: func(x, *args, **kwargs) + f = lambda x: func(x, *args, **kwargs) # iterate through "columns" ex exclusions to populate output dict output: Dict[base.OutputKey, np.ndarray] = {} @@ -1069,21 +1101,11 @@ def _python_agg_general( # agg_series below assumes ngroups > 0 continue - if maybe_use_numba(engine): - result, counts = self.grouper.agg_series( - obj, - func, - *args, - engine=engine, - engine_kwargs=engine_kwargs, - **kwargs, - ) - else: - try: - # if this function is invalid for this dtype, we will ignore it. - result, counts = self.grouper.agg_series(obj, f) - except TypeError: - continue + try: + # if this function is invalid for this dtype, we will ignore it. + result, counts = self.grouper.agg_series(obj, f) + except TypeError: + continue assert result is not None key = base.OutputKey(label=name, position=idx) diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py new file mode 100644 index 0000000000000..aebe60f797fcd --- /dev/null +++ b/pandas/core/groupby/numba_.py @@ -0,0 +1,172 @@ +"""Common utilities for Numba operations with groupby ops""" +import inspect +from typing import Any, Callable, Dict, Optional, Tuple + +import numpy as np + +from pandas._typing import FrameOrSeries, Scalar +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import ( + NUMBA_FUNC_CACHE, + NumbaUtilError, + check_kwargs_and_nopython, + get_jit_arguments, + jit_user_function, +) + + +def split_for_numba(arg: FrameOrSeries) -> Tuple[np.ndarray, np.ndarray]: + """ + Split pandas object into its components as numpy arrays for numba functions. + + Parameters + ---------- + arg : Series or DataFrame + + Returns + ------- + (ndarray, ndarray) + values, index + """ + return arg.to_numpy(), arg.index.to_numpy() + + +def validate_udf(func: Callable) -> None: + """ + Validate user defined function for ops when using Numba with groupby ops. + + The first signature arguments should include: + + def f(values, index, ...): + ... + + Parameters + ---------- + func : function, default False + user defined function + + Returns + ------- + None + + Raises + ------ + NumbaUtilError + """ + udf_signature = list(inspect.signature(func).parameters.keys()) + expected_args = ["values", "index"] + min_number_args = len(expected_args) + if ( + len(udf_signature) < min_number_args + or udf_signature[:min_number_args] != expected_args + ): + raise NumbaUtilError( + f"The first {min_number_args} arguments to {func.__name__} must be " + f"{expected_args}" + ) + + +def generate_numba_func( + func: Callable, + engine_kwargs: Optional[Dict[str, bool]], + kwargs: dict, + cache_key_str: str, +) -> Tuple[Callable, Tuple[Callable, str]]: + """ + Return a JITed function and cache key for the NUMBA_FUNC_CACHE + + This _may_ be specific to groupby (as it's only used there currently). + + Parameters + ---------- + func : function + user defined function + engine_kwargs : dict or None + numba.jit arguments + kwargs : dict + kwargs for func + cache_key_str : str + string representing the second part of the cache key tuple + + Returns + ------- + (JITed function, cache key) + + Raises + ------ + NumbaUtilError + """ + nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + check_kwargs_and_nopython(kwargs, nopython) + validate_udf(func) + cache_key = (func, cache_key_str) + numba_func = NUMBA_FUNC_CACHE.get( + cache_key, jit_user_function(func, nopython, nogil, parallel) + ) + return numba_func, cache_key + + +def generate_numba_agg_func( + args: Tuple, + kwargs: Dict[str, Any], + func: Callable[..., Scalar], + engine_kwargs: Optional[Dict[str, bool]], +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: + """ + Generate a numba jitted agg function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the rolling apply function. + + Parameters + ---------- + args : tuple + *args to be passed into the function + kwargs : dict + **kwargs to be passed into the function + func : function + function to be applied to each window and will be JITed + engine_kwargs : dict + dictionary of arguments to be passed into numba.jit + + Returns + ------- + Numba function + """ + nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + + check_kwargs_and_nopython(kwargs, nopython) + + validate_udf(func) + + numba_func = jit_user_function(func, nopython, nogil, parallel) + + numba = import_optional_dependency("numba") + + if parallel: + loop_range = numba.prange + else: + loop_range = range + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_apply( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_groups: int, + num_columns: int, + ) -> np.ndarray: + result = np.empty((num_groups, num_columns)) + for i in loop_range(num_groups): + group_index = index[begin[i] : end[i]] + for j in loop_range(num_columns): + group = values[begin[i] : end[i], j] + result[i, j] = numba_func(group, group_index, *args) + return result + + return group_apply diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 64eb413fe78fa..c6171a55359fe 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -55,12 +55,6 @@ get_group_index_sorter, get_indexer_dict, ) -from pandas.core.util.numba_ import ( - NUMBA_FUNC_CACHE, - generate_numba_func, - maybe_use_numba, - split_for_numba, -) class BaseGrouper: @@ -610,21 +604,11 @@ def _transform( return result def agg_series( - self, - obj: Series, - func: F, - *args, - engine: str = "cython", - engine_kwargs=None, - **kwargs, + self, obj: Series, func: F, *args, **kwargs, ): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 - if maybe_use_numba(engine): - return self._aggregate_series_pure_python( - obj, func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs - ) if len(obj) == 0: # SeriesGrouper would raise if we were to call _aggregate_series_fast return self._aggregate_series_pure_python(obj, func) @@ -670,20 +654,8 @@ def _aggregate_series_fast(self, obj: Series, func: F): return result, counts def _aggregate_series_pure_python( - self, - obj: Series, - func: F, - *args, - engine: str = "cython", - engine_kwargs=None, - **kwargs, + self, obj: Series, func: F, *args, **kwargs, ): - - if maybe_use_numba(engine): - numba_func, cache_key = generate_numba_func( - func, engine_kwargs, kwargs, "groupby_agg" - ) - group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) @@ -692,13 +664,7 @@ def _aggregate_series_pure_python( splitter = get_splitter(obj, group_index, ngroups, axis=0) for label, group in splitter: - if maybe_use_numba(engine): - values, index = split_for_numba(group) - res = numba_func(values, index, *args) - if cache_key not in NUMBA_FUNC_CACHE: - NUMBA_FUNC_CACHE[cache_key] = numba_func - else: - res = func(group, *args, **kwargs) + res = func(group, *args, **kwargs) if result is None: if isinstance(res, (Series, Index, np.ndarray)): @@ -876,13 +842,7 @@ def groupings(self) -> "List[grouper.Grouping]": ] def agg_series( - self, - obj: Series, - func: F, - *args, - engine: str = "cython", - engine_kwargs=None, - **kwargs, + self, obj: Series, func: F, *args, **kwargs, ): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index c9b7943478cdd..b951cd4f0cc2a 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -1,12 +1,10 @@ """Common utilities for Numba operations""" from distutils.version import LooseVersion -import inspect import types from typing import Callable, Dict, Optional, Tuple import numpy as np -from pandas._typing import FrameOrSeries from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError @@ -129,94 +127,3 @@ def impl(data, *_args): return impl return numba_func - - -def split_for_numba(arg: FrameOrSeries) -> Tuple[np.ndarray, np.ndarray]: - """ - Split pandas object into its components as numpy arrays for numba functions. - - Parameters - ---------- - arg : Series or DataFrame - - Returns - ------- - (ndarray, ndarray) - values, index - """ - return arg.to_numpy(), arg.index.to_numpy() - - -def validate_udf(func: Callable) -> None: - """ - Validate user defined function for ops when using Numba. - - The first signature arguments should include: - - def f(values, index, ...): - ... - - Parameters - ---------- - func : function, default False - user defined function - - Returns - ------- - None - - Raises - ------ - NumbaUtilError - """ - udf_signature = list(inspect.signature(func).parameters.keys()) - expected_args = ["values", "index"] - min_number_args = len(expected_args) - if ( - len(udf_signature) < min_number_args - or udf_signature[:min_number_args] != expected_args - ): - raise NumbaUtilError( - f"The first {min_number_args} arguments to {func.__name__} must be " - f"{expected_args}" - ) - - -def generate_numba_func( - func: Callable, - engine_kwargs: Optional[Dict[str, bool]], - kwargs: dict, - cache_key_str: str, -) -> Tuple[Callable, Tuple[Callable, str]]: - """ - Return a JITed function and cache key for the NUMBA_FUNC_CACHE - - This _may_ be specific to groupby (as it's only used there currently). - - Parameters - ---------- - func : function - user defined function - engine_kwargs : dict or None - numba.jit arguments - kwargs : dict - kwargs for func - cache_key_str : str - string representing the second part of the cache key tuple - - Returns - ------- - (JITed function, cache key) - - Raises - ------ - NumbaUtilError - """ - nopython, nogil, parallel = get_jit_arguments(engine_kwargs) - check_kwargs_and_nopython(kwargs, nopython) - validate_udf(func) - cache_key = (func, cache_key_str) - numba_func = NUMBA_FUNC_CACHE.get( - cache_key, jit_user_function(func, nopython, nogil, parallel) - ) - return numba_func, cache_key diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 690694b0e66f5..29e65e938f6f9 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -4,7 +4,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame, option_context +from pandas import DataFrame, NamedAgg, option_context import pandas._testing as tm from pandas.core.util.numba_ import NUMBA_FUNC_CACHE @@ -128,3 +128,25 @@ def func_1(values, index): with option_context("compute.use_numba", True): result = grouped.agg(func_1, engine=None) tm.assert_frame_equal(expected, result) + + +@td.skip_if_no("numba", "0.46.0") +@pytest.mark.parametrize( + "agg_func", + [ + ["min", "max"], + "min", + {"B": ["min", "max"], "C": "sum"}, + NamedAgg(column="B", aggfunc="min"), + ], +) +def test_multifunc_notimplimented(agg_func): + data = DataFrame( + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + ) + grouped = data.groupby(0) + with pytest.raises(NotImplementedError, match="Numba engine can"): + grouped.agg(agg_func, engine="numba") + + with pytest.raises(NotImplementedError, match="Numba engine can"): + grouped[1].agg(agg_func, engine="numba") From 23b1717a14f185e3512607d33cdba17bfddfd064 Mon Sep 17 00:00:00 2001 From: Marian Denes Date: Sat, 22 Aug 2020 18:03:20 +0200 Subject: [PATCH 318/632] DOC: Fixed docstring for mode() (#35624) * DOC: Fixed docstring for mode() * DOC: Changed docstring for mode() - Explanation of mode() function * Update pandas/core/series.py Co-authored-by: Marco Gorelli * DOC: Changes in docstring for mode() - Fixed 1st line of docstring - Added explanation for mode() function - Explanation for mode(): joined 2 lines into 1 * Update pandas/core/series.py Co-authored-by: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Co-authored-by: Marco Gorelli Co-authored-by: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> --- pandas/core/series.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pandas/core/series.py b/pandas/core/series.py index cd2db659fbd0e..555024ad75f5e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1800,7 +1800,9 @@ def count(self, level=None): def mode(self, dropna=True) -> "Series": """ - Return the mode(s) of the dataset. + Return the mode(s) of the Series. + + The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. From ee8967ffb388a27f5fc5d66331a4fd0f36ebb8ec Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 22 Aug 2020 17:06:16 -0700 Subject: [PATCH 319/632] REF: move towards making _apply_blockwise actually block-wise (#35730) * REF: move towards making _apply_blockwise actually block-wise * mypy fixup * mypy fixup * Series->_constructor * dummy commit to force CI --- pandas/core/window/rolling.py | 75 ++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index f516871f789d0..f7e81f41b8675 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -6,13 +6,23 @@ from functools import partial import inspect from textwrap import dedent -from typing import Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + List, + Optional, + Set, + Tuple, + Type, + Union, +) import numpy as np from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import ArrayLike, Axis, FrameOrSeries, Label +from pandas._typing import ArrayLike, Axis, FrameOrSeriesUnion, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -55,6 +65,9 @@ ) from pandas.core.window.numba_ import generate_numba_apply_func +if TYPE_CHECKING: + from pandas import Series + def calculate_center_offset(window) -> int: """ @@ -145,7 +158,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin): def __init__( self, - obj: FrameOrSeries, + obj: FrameOrSeriesUnion, window=None, min_periods: Optional[int] = None, center: bool = False, @@ -219,7 +232,7 @@ def _validate_get_window_bounds_signature(window: BaseIndexer) -> None: f"get_window_bounds" ) - def _create_blocks(self, obj: FrameOrSeries): + def _create_blocks(self, obj: FrameOrSeriesUnion): """ Split data into blocks & return conformed data. """ @@ -381,7 +394,7 @@ def _wrap_result(self, result, block=None, obj=None): return type(obj)(result, index=index, columns=block.columns) return result - def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeries: + def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion: """ Wrap the results. @@ -394,22 +407,23 @@ def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeries: """ from pandas import Series, concat + if obj.ndim == 1: + if not results: + raise DataError("No numeric types to aggregate") + assert len(results) == 1 + return Series(results[0], index=obj.index, name=obj.name) + exclude: List[Label] = [] - if obj.ndim == 2: - orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) - for i in skipped: - exclude.extend(orig_blocks[i].columns) - else: - orig_blocks = [obj] + orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) + for i in skipped: + exclude.extend(orig_blocks[i].columns) kept_blocks = [blk for i, blk in enumerate(orig_blocks) if i not in skipped] final = [] for result, block in zip(results, kept_blocks): - result = self._wrap_result(result, block=block, obj=obj) - if result.ndim == 1: - return result + result = type(obj)(result, index=obj.index, columns=block.columns) final.append(result) exclude = exclude or [] @@ -488,13 +502,31 @@ def _get_window_indexer(self, window: int) -> BaseIndexer: return VariableWindowIndexer(index_array=self._on.asi8, window_size=window) return FixedWindowIndexer(window_size=window) + def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series": + """ + Series version of _apply_blockwise + """ + _, obj = self._create_blocks(self._selected_obj) + values = obj.values + + try: + values = self._prep_values(obj.values) + except (TypeError, NotImplementedError) as err: + raise DataError("No numeric types to aggregate") from err + + result = homogeneous_func(values) + return obj._constructor(result, index=obj.index, name=obj.name) + def _apply_blockwise( self, homogeneous_func: Callable[..., ArrayLike] - ) -> FrameOrSeries: + ) -> FrameOrSeriesUnion: """ Apply the given function to the DataFrame broken down into homogeneous sub-frames. """ + if self._selected_obj.ndim == 1: + return self._apply_series(homogeneous_func) + # This isn't quite blockwise, since `blocks` is actually a collection # of homogenenous DataFrames. blocks, obj = self._create_blocks(self._selected_obj) @@ -505,12 +537,9 @@ def _apply_blockwise( try: values = self._prep_values(b.values) - except (TypeError, NotImplementedError) as err: - if isinstance(obj, ABCDataFrame): - skipped.append(i) - continue - else: - raise DataError("No numeric types to aggregate") from err + except (TypeError, NotImplementedError): + skipped.append(i) + continue result = homogeneous_func(values) results.append(result) @@ -2234,7 +2263,7 @@ def _apply( def _constructor(self): return Rolling - def _create_blocks(self, obj: FrameOrSeries): + def _create_blocks(self, obj: FrameOrSeriesUnion): """ Split data into blocks & return conformed data. """ @@ -2275,7 +2304,7 @@ def _get_window_indexer(self, window: int) -> GroupbyRollingIndexer: if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) indexer_kwargs = self.window.__dict__ - assert isinstance(indexer_kwargs, dict) + assert isinstance(indexer_kwargs, dict) # for mypy # We'll be using the index of each group later indexer_kwargs.pop("index_array", None) elif self.is_freq_type: From eca60680df2d4b7973d9e5b8252b7b9be3f96c3c Mon Sep 17 00:00:00 2001 From: 21CSM <69096687+21CSM@users.noreply.github.com> Date: Sun, 23 Aug 2020 03:40:29 -0400 Subject: [PATCH 320/632] DOC: Fix code of conduct link (#35857) Fixes #35855 --- web/pandas/about/team.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/pandas/about/team.md b/web/pandas/about/team.md index 8eb2edebec817..39f63202e1986 100644 --- a/web/pandas/about/team.md +++ b/web/pandas/about/team.md @@ -2,7 +2,7 @@ ## Contributors -_pandas_ is made with love by more than [1,500 volunteer contributors](https://github.com/pandas-dev/pandas/graphs/contributors). +_pandas_ is made with love by more than [2,000 volunteer contributors](https://github.com/pandas-dev/pandas/graphs/contributors). If you want to support pandas development, you can find information in the [donations page](../donate.html). @@ -42,7 +42,7 @@ If you want to support pandas development, you can find information in the [dona > or anyone willing to increase the diversity of our team. > We have identified visible gaps and obstacles in sustaining diversity and inclusion in the open-source communities and we are proactive in increasing > the diversity of our team. -> We have a [code of conduct]({base_url}/community/coc.html) to ensure a friendly and welcoming environment. +> We have a [code of conduct](../community/coc.html) to ensure a friendly and welcoming environment. > Please send an email to [pandas-code-of-conduct-committee](mailto:pandas-coc@googlegroups.com), if you think we can do a > better job at achieving this goal. From a23af4e19946779b57092af42e9b8f1cc3d01275 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 24 Aug 2020 01:50:11 -0500 Subject: [PATCH 321/632] DOC: Mention NA for missing data in README (#35638) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a72e8402e68a0..a2f2f1c04442a 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ its way towards this goal. Here are just a few of the things that pandas does well: - Easy handling of [**missing data**][missing-data] (represented as - `NaN`) in floating point as well as non-floating point data + `NaN`, `NA`, or `NaT`) in floating point as well as non-floating point data - Size mutability: columns can be [**inserted and deleted**][insertion-deletion] from DataFrame and higher dimensional objects From da667afcf841c24b3f4a353acc50470867abcae3 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 24 Aug 2020 07:24:04 -0700 Subject: [PATCH 322/632] CLN/PERF: delay evaluation of get_day_of_month (#35866) --- pandas/_libs/tslibs/offsets.pyx | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7f0314d737619..161e5f4e54f51 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3705,7 +3705,7 @@ cdef inline void _shift_months(const int64_t[:] dtindex, """See shift_months.__doc__""" cdef: Py_ssize_t i - int months_to_roll, compare_day + int months_to_roll npy_datetimestruct dts for i in range(count): @@ -3715,10 +3715,8 @@ cdef inline void _shift_months(const int64_t[:] dtindex, dt64_to_dtstruct(dtindex[i], &dts) months_to_roll = months - compare_day = get_day_of_month(&dts, day_opt) - months_to_roll = roll_convention(dts.day, months_to_roll, - compare_day) + months_to_roll = _roll_qtrday(&dts, months_to_roll, 0, day_opt) dts.year = year_add_months(dts, months_to_roll) dts.month = month_add_months(dts, months_to_roll) From 07983803b0a8462f9cf24b15d3fb4792ed866f44 Mon Sep 17 00:00:00 2001 From: guru kiran <47276342+gurukiran07@users.noreply.github.com> Date: Mon, 24 Aug 2020 21:06:17 +0530 Subject: [PATCH 323/632] DOC: Updated aggregate docstring (#35042) * DOC: Updated aggregate docstring * Doc: updated aggregate docstring * Update pandas/core/generic.py Co-authored-by: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> * Update generic.py * Update generic.py * Revert "Update generic.py" This reverts commit 15ecaf724e98c5bcb2d459e720ca60e22e758346. * Revert "Revert "Update generic.py"" This reverts commit cc231c80a7a0bf9a18f3d4342e156d9d95c5ac8b. * Updated docstring of agg * Trailing whitespace removed * DOC: Updated docstring of agg * Update generic.py * Updated Docstring Co-authored-by: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> --- pandas/core/generic.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fe412bc0ce937..9f36405bf6428 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5169,6 +5169,9 @@ def pipe(self, func, *args, **kwargs): ----- `agg` is an alias for `aggregate`. Use the alias. + In pandas, agg, as most operations just ignores the missing values, + and returns the operation only considering the values that are present. + A passed user-defined-function will be passed a Series for evaluation. {examples}""" ) From 03fc7f97c96c1d6f474558cddff1112aebba47fc Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Mon, 24 Aug 2020 23:55:43 +0100 Subject: [PATCH 324/632] DEPR: deprecate dtype param in Index.copy (#35853) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/indexes/base.py | 9 +++++++++ pandas/core/indexes/multi.py | 19 ++++++++++++++----- pandas/core/indexes/range.py | 11 +++++++++-- pandas/tests/indexes/common.py | 7 ++++++- pandas/tests/indexes/test_base.py | 5 ----- pandas/tests/indexes/test_common.py | 8 ++------ pandas/tests/indexes/test_numeric.py | 4 ++-- 8 files changed, 43 insertions(+), 22 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 09a5bcb0917c2..adc1806523d6e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -143,7 +143,7 @@ See :ref:`install.dependencies` and :ref:`install.optional_dependencies` for mor Deprecations ~~~~~~~~~~~~ - Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) -- +- Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 623ce68201492..ceb109fdf6d7a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -800,6 +800,9 @@ def copy(self, name=None, deep=False, dtype=None, names=None): deep : bool, default False dtype : numpy dtype or pandas type, optional Set dtype for new object. + + .. deprecated:: 1.2.0 + use ``astype`` method instead. names : list-like, optional Kept for compatibility with MultiIndex. Should not be used. @@ -820,6 +823,12 @@ def copy(self, name=None, deep=False, dtype=None, names=None): new_index = self._shallow_copy(name=name) if dtype: + warnings.warn( + "parameter dtype is deprecated and will be removed in a future " + "version. Use the astype method instead.", + FutureWarning, + stacklevel=2, + ) new_index = new_index.astype(dtype) return new_index diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index ffbd03d0c3ba7..b29c27982f087 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1030,7 +1030,6 @@ def _shallow_copy( name=lib.no_default, levels=None, codes=None, - dtype=None, sortorder=None, names=lib.no_default, _set_identity: bool = True, @@ -1041,7 +1040,7 @@ def _shallow_copy( names = name if name is not lib.no_default else self.names if values is not None: - assert levels is None and codes is None and dtype is None + assert levels is None and codes is None return MultiIndex.from_tuples(values, sortorder=sortorder, names=names) levels = levels if levels is not None else self.levels @@ -1050,7 +1049,6 @@ def _shallow_copy( result = MultiIndex( levels=levels, codes=codes, - dtype=dtype, sortorder=sortorder, names=names, verify_integrity=False, @@ -1092,6 +1090,8 @@ def copy( ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional + + .. deprecated:: 1.2.0 levels : sequence, optional codes : sequence, optional deep : bool, default False @@ -1117,15 +1117,24 @@ def copy( if codes is None: codes = deepcopy(self.codes) - return self._shallow_copy( + new_index = self._shallow_copy( levels=levels, codes=codes, names=names, - dtype=dtype, sortorder=self.sortorder, _set_identity=_set_identity, ) + if dtype: + warnings.warn( + "parameter dtype is deprecated and will be removed in a future " + "version. Use the astype method instead.", + FutureWarning, + stacklevel=2, + ) + new_index = new_index.astype(dtype) + return new_index + def __array__(self, dtype=None) -> np.ndarray: """ the array interface, return my values """ return self.values diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index c65c3d5ff3d9c..c5572a9de7fa5 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -390,10 +390,17 @@ def _shallow_copy(self, values=None, name: Label = no_default): @doc(Int64Index.copy) def copy(self, name=None, deep=False, dtype=None, names=None): - self._validate_dtype(dtype) - name = self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._shallow_copy(name=name) + + if dtype: + warnings.warn( + "parameter dtype is deprecated and will be removed in a future " + "version. Use the astype method instead.", + FutureWarning, + stacklevel=2, + ) + new_index = new_index.astype(dtype) return new_index def _minmax(self, meth: str): diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 98f7c0eadb4bb..e4d0b46f7c716 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -270,7 +270,7 @@ def test_copy_name(self, index): s3 = s1 * s2 assert s3.index.name == "mario" - def test_name2(self, index): + def test_copy_name2(self, index): # gh-35592 if isinstance(index, MultiIndex): return @@ -284,6 +284,11 @@ def test_name2(self, index): with pytest.raises(TypeError, match=msg): index.copy(name=[["mario"]]) + def test_copy_dtype_deprecated(self, index): + # GH35853 + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + index.copy(dtype=object) + def test_ensure_copied_data(self, index): # Check the "copy" argument of each Index.__new__ is honoured # GH12309 diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 70eb9e502f78a..aee4b16621b4d 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -62,11 +62,6 @@ def test_new_axis(self, index): assert new_index.ndim == 2 assert isinstance(new_index, np.ndarray) - @pytest.mark.parametrize("index", ["int", "uint", "float"], indirect=True) - def test_copy_and_deepcopy(self, index): - new_copy2 = index.copy(dtype=int) - assert new_copy2.dtype.kind == "i" - def test_constructor_regular(self, index): tm.assert_contains_all(index, index) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index 02a173eb4958d..db260b71e7186 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -374,8 +374,7 @@ def test_has_duplicates(self, index): "dtype", ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], ) - @pytest.mark.parametrize("copy", [True, False]) - def test_astype_preserves_name(self, index, dtype, copy): + def test_astype_preserves_name(self, index, dtype): # https://github.com/pandas-dev/pandas/issues/32013 if isinstance(index, MultiIndex): index.names = ["idx" + str(i) for i in range(index.nlevels)] @@ -384,10 +383,7 @@ def test_astype_preserves_name(self, index, dtype, copy): try: # Some of these conversions cannot succeed so we use a try / except - if copy: - result = index.copy(dtype=dtype) - else: - result = index.astype(dtype) + result = index.astype(dtype) except (ValueError, TypeError, NotImplementedError, SystemError): return diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index bfcac5d433d2c..e6f455e60eee3 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -394,7 +394,7 @@ def test_identical(self): same_values_different_type = Index(i, dtype=object) assert not i.identical(same_values_different_type) - i = index.copy(dtype=object) + i = index.astype(dtype=object) i = i.rename("foo") same_values = Index(i, dtype=object) assert same_values.identical(i) @@ -402,7 +402,7 @@ def test_identical(self): assert not i.identical(index) assert Index(same_values, name="foo", dtype=object).identical(i) - assert not index.copy(dtype=object).identical(index.copy(dtype=self._dtype)) + assert not index.astype(dtype=object).identical(index.astype(dtype=self._dtype)) def test_union_noncomparable(self): # corner case, non-Int64Index From a3e94de99f1fda5ae8c65d458ae3f77e3af33e55 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 24 Aug 2020 16:32:53 -0700 Subject: [PATCH 325/632] REF: use Block.apply in cython_agg_blocks (#35854) --- pandas/core/groupby/generic.py | 41 +++++++++++++++++----------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 0edbfe3d67ca5..1198baab12ac1 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1069,16 +1069,17 @@ def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike: # reshape to be valid for non-Extension Block result = result.reshape(1, -1) + elif isinstance(result, np.ndarray) and result.ndim == 1: + # We went through a SeriesGroupByPath and need to reshape + result = result.reshape(1, -1) + return result - def blk_func(block: "Block") -> List["Block"]: - new_blocks: List["Block"] = [] + def blk_func(bvalues: ArrayLike) -> ArrayLike: - result = no_result - locs = block.mgr_locs.as_array try: result, _ = self.grouper.aggregate( - block.values, how, axis=1, min_count=min_count + bvalues, how, axis=1, min_count=min_count ) except NotImplementedError: # generally if we have numeric_only=False @@ -1091,12 +1092,17 @@ def blk_func(block: "Block") -> List["Block"]: assert how == "ohlc" raise + obj: Union[Series, DataFrame] # call our grouper again with only this block - obj = self.obj[data.items[locs]] - if obj.shape[1] == 1: - # Avoid call to self.values that can occur in DataFrame - # reductions; see GH#28949 - obj = obj.iloc[:, 0] + if isinstance(bvalues, ExtensionArray): + # TODO(EA2D): special case not needed with 2D EAs + obj = Series(bvalues) + else: + obj = DataFrame(bvalues.T) + if obj.shape[1] == 1: + # Avoid call to self.values that can occur in DataFrame + # reductions; see GH#28949 + obj = obj.iloc[:, 0] # Create SeriesGroupBy with observed=True so that it does # not try to add missing categories if grouping over multiple @@ -1114,21 +1120,14 @@ def blk_func(block: "Block") -> List["Block"]: # unwrap DataFrame to get array result = result._mgr.blocks[0].values - if isinstance(result, np.ndarray) and result.ndim == 1: - result = result.reshape(1, -1) - res_values = cast_agg_result(result, block.values, how) - agg_block = block.make_block(res_values) - new_blocks = [agg_block] - else: - res_values = cast_agg_result(result, block.values, how) - agg_block = block.make_block(res_values) - new_blocks = [agg_block] - return new_blocks + + res_values = cast_agg_result(result, bvalues, how) + return res_values skipped: List[int] = [] for i, block in enumerate(data.blocks): try: - nbs = blk_func(block) + nbs = block.apply(blk_func) except (NotImplementedError, TypeError): # TypeError -> we may have an exception in trying to aggregate # continue and exclude the block From 0fde208475ee335e731599291c4f3d5b80d28d6a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 24 Aug 2020 16:35:40 -0700 Subject: [PATCH 326/632] REF: implement Block.reduce for DataFrame._reduce (#35867) --- pandas/core/frame.py | 10 ++++------ pandas/core/internals/blocks.py | 15 +++++++++++++++ pandas/core/internals/managers.py | 29 ++++++++--------------------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 837bd35414773..606bd4cc3b52d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8647,13 +8647,11 @@ def blk_func(values): return op(values, axis=1, skipna=skipna, **kwds) # After possibly _get_data and transposing, we are now in the - # simple case where we can use BlockManager._reduce + # simple case where we can use BlockManager.reduce res = df._mgr.reduce(blk_func) - assert isinstance(res, dict) - if len(res): - assert len(res) == max(list(res.keys())) + 1, res.keys() - out = df._constructor_sliced(res, index=range(len(res)), dtype=out_dtype) - out.index = df.columns + out = df._constructor(res,).iloc[0].rename(None) + if out_dtype is not None: + out = out.astype(out_dtype) if axis == 0 and is_object_dtype(out.dtype): out[:] = coerce_to_dtypes(out.values, df.dtypes) return out diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index f3286b3c20965..c62be4f767f00 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -346,6 +346,21 @@ def apply(self, func, **kwargs) -> List["Block"]: return self._split_op_result(result) + def reduce(self, func) -> List["Block"]: + # We will apply the function and reshape the result into a single-row + # Block with the same mgr_locs; squeezing will be done at a higher level + assert self.ndim == 2 + + result = func(self.values) + if np.ndim(result) == 0: + # TODO(EA2D): special case not needed with 2D EAs + res_values = np.array([[result]]) + else: + res_values = result.reshape(-1, 1) + + nb = self.make_block(res_values) + return [nb] + def _split_op_result(self, result) -> List["Block"]: # See also: split_and_operate if is_extension_array_dtype(result) and result.ndim > 1: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index f05d4cf1c4be6..297ad3077ef1d 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -330,31 +330,18 @@ def _verify_integrity(self) -> None: f"tot_items: {tot_items}" ) - def reduce(self, func): + def reduce(self: T, func) -> T: # If 2D, we assume that we're operating column-wise - if self.ndim == 1: - # we'll be returning a scalar - blk = self.blocks[0] - return func(blk.values) + assert self.ndim == 2 - res = {} + res_blocks = [] for blk in self.blocks: - bres = func(blk.values) - - if np.ndim(bres) == 0: - # EA - assert blk.shape[0] == 1 - new_res = zip(blk.mgr_locs.as_array, [bres]) - else: - assert bres.ndim == 1, bres.shape - assert blk.shape[0] == len(bres), (blk.shape, bres.shape) - new_res = zip(blk.mgr_locs.as_array, bres) - - nr = dict(new_res) - assert not any(key in res for key in nr) - res.update(nr) + nbs = blk.reduce(func) + res_blocks.extend(nbs) - return res + index = Index([0]) # placeholder + new_mgr = BlockManager.from_blocks(res_blocks, [self.items, index]) + return new_mgr def operate_blockwise(self, other: "BlockManager", array_op) -> "BlockManager": """ From 02b0a86da5a972375384cbc67c4d833b692caaba Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 24 Aug 2020 16:36:49 -0700 Subject: [PATCH 327/632] REF: make window _apply_blockwise actually blockwise (#35861) --- pandas/core/window/rolling.py | 71 +++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 25 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index f7e81f41b8675..a70247d9f7f9c 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -66,7 +66,8 @@ from pandas.core.window.numba_ import generate_numba_apply_func if TYPE_CHECKING: - from pandas import Series + from pandas import DataFrame, Series + from pandas.core.internals import Block # noqa:F401 def calculate_center_offset(window) -> int: @@ -418,35 +419,40 @@ def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion: for i in skipped: exclude.extend(orig_blocks[i].columns) - kept_blocks = [blk for i, blk in enumerate(orig_blocks) if i not in skipped] - - final = [] - for result, block in zip(results, kept_blocks): - - result = type(obj)(result, index=obj.index, columns=block.columns) - final.append(result) - - exclude = exclude or [] columns = [c for c in self._selected_obj.columns if c not in exclude] - if not columns and not len(final) and exclude: + if not columns and not len(results) and exclude: raise DataError("No numeric types to aggregate") - elif not len(final): + elif not len(results): return obj.astype("float64") - df = concat(final, axis=1).reindex(columns=columns, copy=False) + df = concat(results, axis=1).reindex(columns=columns, copy=False) + self._insert_on_column(df, obj) + return df + def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"): # if we have an 'on' column we want to put it back into # the results in the same location + from pandas import Series + if self.on is not None and not self._on.equals(obj.index): name = self._on.name extra_col = Series(self._on, index=obj.index, name=name) - if name not in df.columns and name not in df.index.names: - new_loc = len(df.columns) - df.insert(new_loc, name, extra_col) - elif name in df.columns: + if name in result.columns: # TODO: sure we want to overwrite results? - df[name] = extra_col - return df + result[name] = extra_col + elif name in result.index.names: + pass + elif name in self._selected_obj.columns: + # insert in the same location as we had in _selected_obj + old_cols = self._selected_obj.columns + new_cols = result.columns + old_loc = old_cols.get_loc(name) + overlap = new_cols.intersection(old_cols[:old_loc]) + new_loc = len(overlap) + result.insert(new_loc, name, extra_col) + else: + # insert at the end + result[name] = extra_col def _center_window(self, result, window) -> np.ndarray: """ @@ -530,21 +536,36 @@ def _apply_blockwise( # This isn't quite blockwise, since `blocks` is actually a collection # of homogenenous DataFrames. blocks, obj = self._create_blocks(self._selected_obj) + mgr = obj._mgr + + def hfunc(bvalues: ArrayLike) -> ArrayLike: + # TODO(EA2D): getattr unnecessary with 2D EAs + values = self._prep_values(getattr(bvalues, "T", bvalues)) + res_values = homogeneous_func(values) + return getattr(res_values, "T", res_values) skipped: List[int] = [] - results: List[ArrayLike] = [] - for i, b in enumerate(blocks): + res_blocks: List["Block"] = [] + for i, blk in enumerate(mgr.blocks): try: - values = self._prep_values(b.values) + nbs = blk.apply(hfunc) except (TypeError, NotImplementedError): skipped.append(i) continue - result = homogeneous_func(values) - results.append(result) + res_blocks.extend(nbs) + + if not len(res_blocks) and skipped: + raise DataError("No numeric types to aggregate") + elif not len(res_blocks): + return obj.astype("float64") - return self._wrap_results(results, obj, skipped) + new_cols = mgr.reset_dropped_locs(res_blocks, skipped) + new_mgr = type(mgr).from_blocks(res_blocks, [new_cols, obj.index]) + out = obj._constructor(new_mgr) + self._insert_on_column(out, obj) + return out def _apply( self, From 5a0059d51e2005322ddca1153179b988a79cd6e7 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Mon, 24 Aug 2020 16:42:01 -0700 Subject: [PATCH 328/632] COMPAT: Ensure rolling indexers return intp during take operations (#35875) --- pandas/core/window/indexers.py | 16 +++++++++------- pandas/tests/resample/test_resampler_grouper.py | 3 +-- pandas/tests/window/test_api.py | 5 ++--- pandas/tests/window/test_apply.py | 3 +-- pandas/tests/window/test_grouper.py | 13 +------------ pandas/tests/window/test_rolling.py | 4 +--- pandas/tests/window/test_timeseries_window.py | 3 --- 7 files changed, 15 insertions(+), 32 deletions(-) diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index 7cbe34cdebf9f..7c76a8e2a0b22 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -7,6 +7,8 @@ from pandas._libs.window.indexers import calculate_variable_window_bounds from pandas.util._decorators import Appender +from pandas.core.dtypes.common import ensure_platform_int + from pandas.tseries.offsets import Nano get_window_bounds_doc = """ @@ -296,9 +298,9 @@ def get_window_bounds( start_arrays = [] end_arrays = [] window_indicies_start = 0 - for key, indicies in self.groupby_indicies.items(): + for key, indices in self.groupby_indicies.items(): if self.index_array is not None: - index_array = self.index_array.take(indicies) + index_array = self.index_array.take(ensure_platform_int(indices)) else: index_array = self.index_array indexer = self.rolling_indexer( @@ -307,22 +309,22 @@ def get_window_bounds( **self.indexer_kwargs, ) start, end = indexer.get_window_bounds( - len(indicies), min_periods, center, closed + len(indices), min_periods, center, closed ) start = start.astype(np.int64) end = end.astype(np.int64) # Cannot use groupby_indicies as they might not be monotonic with the object # we're rolling over window_indicies = np.arange( - window_indicies_start, window_indicies_start + len(indicies), + window_indicies_start, window_indicies_start + len(indices), ) - window_indicies_start += len(indicies) + window_indicies_start += len(indices) # Extend as we'll be slicing window like [start, end) window_indicies = np.append( window_indicies, [window_indicies[-1] + 1] ).astype(np.int64) - start_arrays.append(window_indicies.take(start)) - end_arrays.append(window_indicies.take(end)) + start_arrays.append(window_indicies.take(ensure_platform_int(start))) + end_arrays.append(window_indicies.take(ensure_platform_int(end))) start = np.concatenate(start_arrays) end = np.concatenate(end_arrays) # GH 35552: Need to adjust start and end based on the nans appended to values diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index f18aaa5e86829..73bf7dafac254 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -7,7 +7,7 @@ from pandas.util._test_decorators import async_mark import pandas as pd -from pandas import DataFrame, Series, Timestamp, compat +from pandas import DataFrame, Series, Timestamp import pandas._testing as tm from pandas.core.indexes.datetimes import date_range @@ -319,7 +319,6 @@ def test_resample_groupby_with_label(): tm.assert_frame_equal(result, expected) -@pytest.mark.xfail(not compat.IS64, reason="GH-35148") def test_consistency_with_window(): # consistent return values with window diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 28e27791cad35..2c3d8b4608806 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -6,7 +6,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Index, Series, Timestamp, compat, concat +from pandas import DataFrame, Index, Series, Timestamp, concat import pandas._testing as tm from pandas.core.base import SpecificationError @@ -277,7 +277,7 @@ def test_preserve_metadata(): @pytest.mark.parametrize( "func,window_size,expected_vals", [ - pytest.param( + ( "rolling", 2, [ @@ -289,7 +289,6 @@ def test_preserve_metadata(): [35.0, 40.0, 60.0, 40.0], [60.0, 80.0, 85.0, 80], ], - marks=pytest.mark.xfail(not compat.IS64, reason="GH-35294"), ), ( "expanding", diff --git a/pandas/tests/window/test_apply.py b/pandas/tests/window/test_apply.py index 2aaf6af103e98..bc38634da8941 100644 --- a/pandas/tests/window/test_apply.py +++ b/pandas/tests/window/test_apply.py @@ -4,7 +4,7 @@ from pandas.errors import NumbaUtilError import pandas.util._test_decorators as td -from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, compat, date_range +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range import pandas._testing as tm @@ -142,7 +142,6 @@ def test_invalid_kwargs_nopython(): @pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]]) -@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply_args_kwargs(args_kwargs): # GH 33433 def foo(x, par): diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index d0a62374d0888..170bf100b3891 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Series, compat +from pandas import DataFrame, Series import pandas._testing as tm from pandas.core.groupby.groupby import get_groupby @@ -23,7 +23,6 @@ def test_mutated(self): g = get_groupby(self.frame, by="A", mutated=True) assert g.mutated - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_getitem(self): g = self.frame.groupby("A") g_mutated = get_groupby(self.frame, by="A", mutated=True) @@ -56,7 +55,6 @@ def test_getitem_multiple(self): result = r.B.count() tm.assert_series_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling(self): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -74,7 +72,6 @@ def test_rolling(self): @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] ) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_quantile(self, interpolation): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -105,7 +102,6 @@ def func(x): expected = g.apply(func) tm.assert_series_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply(self, raw): g = self.frame.groupby("A") r = g.rolling(window=4) @@ -115,7 +111,6 @@ def test_rolling_apply(self, raw): expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) tm.assert_frame_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_apply_mutability(self): # GH 14013 df = pd.DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) @@ -197,7 +192,6 @@ def test_expanding_apply(self, raw): tm.assert_frame_equal(result, expected) @pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]]) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling(self, expected_value, raw_value): # GH 31754 @@ -215,7 +209,6 @@ def foo(x): ) tm.assert_series_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling_center_center(self): # GH 35552 series = Series(range(1, 6)) @@ -281,7 +274,6 @@ def test_groupby_rolling_center_center(self): ) tm.assert_frame_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_subselect_rolling(self): # GH 35486 df = DataFrame( @@ -307,7 +299,6 @@ def test_groupby_subselect_rolling(self): ) tm.assert_series_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling_custom_indexer(self): # GH 35557 class SimpleIndexer(pd.api.indexers.BaseIndexer): @@ -331,7 +322,6 @@ def get_window_bounds( expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum() tm.assert_frame_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame( @@ -356,7 +346,6 @@ def test_groupby_rolling_subset_with_closed(self): ) tm.assert_series_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_subset_rolling_subset_with_closed(self): # GH 35549 df = pd.DataFrame( diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index bea239a245a4f..8d72e2cb92ca9 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -7,7 +7,7 @@ import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Series, compat, date_range +from pandas import DataFrame, Series, date_range import pandas._testing as tm from pandas.core.window import Rolling @@ -150,7 +150,6 @@ def test_closed_one_entry(func): @pytest.mark.parametrize("func", ["min", "max"]) -@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_closed_one_entry_groupby(func): # GH24718 ser = pd.DataFrame( @@ -683,7 +682,6 @@ def test_iter_rolling_datetime(expected, expected_index, window): ), ], ) -@pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_rolling_positional_argument(grouping, _index, raw): # GH 34605 diff --git a/pandas/tests/window/test_timeseries_window.py b/pandas/tests/window/test_timeseries_window.py index 90f919d5565b0..8aa4d7103e48a 100644 --- a/pandas/tests/window/test_timeseries_window.py +++ b/pandas/tests/window/test_timeseries_window.py @@ -7,7 +7,6 @@ MultiIndex, Series, Timestamp, - compat, date_range, to_datetime, ) @@ -657,7 +656,6 @@ def agg_by_day(x): tm.assert_frame_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_groupby_monotonic(self): # GH 15130 @@ -687,7 +685,6 @@ def test_groupby_monotonic(self): result = df.groupby("name").rolling("180D", on="date")["amount"].sum() tm.assert_series_equal(result, expected) - @pytest.mark.xfail(not compat.IS64, reason="GH-35294") def test_non_monotonic(self): # GH 13966 (similar to #15130, closed by #15175) From e58260216e001571967bb6abdb464d6480a471cf Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 24 Aug 2020 16:45:45 -0700 Subject: [PATCH 329/632] REGR: DatetimeIndex.intersection incorrectly raising AssertionError (#35877) --- doc/source/whatsnew/v1.1.2.rst | 2 +- pandas/core/indexes/datetimelike.py | 4 ++-- pandas/tests/indexes/datetimes/test_setops.py | 7 +++++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c1b73c60be92b..af61354470a71 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -14,7 +14,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - +- Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - - diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 6d9d75a69e91d..9d00f50a65a06 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -704,16 +704,16 @@ def intersection(self, other, sort=False): if result.freq is None: # TODO: no tests rely on this; needed? result = result._with_freq("infer") - assert result.name == res_name + result.name = res_name return result elif not self._can_fast_intersect(other): result = Index.intersection(self, other, sort=sort) - assert result.name == res_name # We need to invalidate the freq because Index.intersection # uses _shallow_copy on a view of self._data, which will preserve # self.freq if we're not careful. result = result._with_freq(None)._with_freq("infer") + result.name = res_name return result # to make our life easier, "sort" the two ranges diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 6670b079ddd29..f19e78323ab23 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -470,6 +470,13 @@ def test_intersection_bug(self): tm.assert_index_equal(result, b) assert result.freq == b.freq + def test_intersection_list(self): + # GH#35876 + values = [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")] + idx = pd.DatetimeIndex(values, name="a") + res = idx.intersection(values) + tm.assert_index_equal(res, idx) + def test_month_range_union_tz_pytz(self, sort): from pytz import timezone From 8607a93d48c06247802fde44e61acc6d9743a3c6 Mon Sep 17 00:00:00 2001 From: Honfung Wong <21543236+onshek@users.noreply.github.com> Date: Tue, 25 Aug 2020 13:37:03 +0800 Subject: [PATCH 330/632] DOC: fix documentation for pandas.Series.transform (#35885) --- pandas/core/generic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9f36405bf6428..286da6e1de9d5 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10703,7 +10703,7 @@ def transform(self, func, *args, **kwargs): - function - string function name - - list of functions and/or function names, e.g. ``[np.exp. 'sqrt']`` + - list of functions and/or function names, e.g. ``[np.exp, 'sqrt']`` - dict of axis labels -> functions, function names or list of such. {axis} *args From d3d74c590e2578988a2be48d786ddafa89f91454 Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Tue, 25 Aug 2020 07:41:48 +0100 Subject: [PATCH 331/632] TST: Fix test_parquet failures for pyarrow 1.0 (#35814) --- pandas/tests/io/test_parquet.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 3a3ba99484a3a..4e0c16c71a6a8 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -565,15 +565,22 @@ def test_s3_roundtrip(self, df_compat, s3_resource, pa): @pytest.mark.parametrize("partition_col", [["A"], []]) def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): # GH #26388 - # https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716 - # As per pyarrow partitioned columns become 'categorical' dtypes - # and are added to back of dataframe on read - if partition_col and pd.compat.is_platform_windows(): - pytest.skip("pyarrow/win incompatibility #35791") - expected_df = df_compat.copy() - if partition_col: - expected_df[partition_col] = expected_df[partition_col].astype("category") + + # GH #35791 + # read_table uses the new Arrow Datasets API since pyarrow 1.0.0 + # Previous behaviour was pyarrow partitioned columns become 'category' dtypes + # These are added to back of dataframe on read. In new API category dtype is + # only used if partition field is string. + legacy_read_table = LooseVersion(pyarrow.__version__) < LooseVersion("1.0.0") + if partition_col and legacy_read_table: + partition_col_type = "category" + else: + partition_col_type = "int32" + + expected_df[partition_col] = expected_df[partition_col].astype( + partition_col_type + ) check_round_trip( df_compat, From e87b8bcaa14633dc5a0dbdbb4b5b88345a804050 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 25 Aug 2020 05:58:43 -0700 Subject: [PATCH 332/632] BUG: to_dict_of_blocks failing to invalidate item_cache (#35874) --- pandas/core/internals/managers.py | 5 ----- pandas/tests/frame/test_block_internals.py | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 297ad3077ef1d..da1f10f924ae7 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -896,12 +896,7 @@ def to_dict(self, copy: bool = True): Returns ------- values : a dict of dtype -> BlockManager - - Notes - ----- - This consolidates based on str(dtype) """ - self._consolidate_inplace() bd: Dict[str, List[Block]] = {} for b in self.blocks: diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index c9fec3215d57f..8ecd9066ceff0 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -626,3 +626,21 @@ def test_add_column_with_pandas_array(self): assert type(df["c"]._mgr.blocks[0]) == ObjectBlock assert type(df2["c"]._mgr.blocks[0]) == ObjectBlock tm.assert_frame_equal(df, df2) + + +def test_to_dict_of_blocks_item_cache(): + # Calling to_dict_of_blocks should not poison item_cache + df = pd.DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) + df["c"] = pd.arrays.PandasArray(np.array([1, 2, None, 3], dtype=object)) + mgr = df._mgr + assert len(mgr.blocks) == 3 # i.e. not consolidated + + ser = df["b"] # populations item_cache["b"] + + df._to_dict_of_blocks() + + # Check that the to_dict_of_blocks didnt break link between ser and df + ser.values[0] = "foo" + assert df.loc[0, "b"] == "foo" + + assert df["b"] is ser From 2f15d1c3c2a5da568e4870d0e5e41529a948bddc Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 25 Aug 2020 05:59:41 -0700 Subject: [PATCH 333/632] REF: reuse _combine instead of reset_dropped_locs (#35884) --- pandas/core/groupby/generic.py | 17 ++++++---------- pandas/core/internals/managers.py | 32 ------------------------------- pandas/core/window/rolling.py | 3 +-- 3 files changed, 7 insertions(+), 45 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1198baab12ac1..70a8379de64e9 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -21,7 +21,6 @@ Mapping, Optional, Sequence, - Tuple, Type, Union, ) @@ -1025,16 +1024,14 @@ def _iterate_slices(self) -> Iterable[Series]: def _cython_agg_general( self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 ) -> DataFrame: - agg_blocks, agg_items = self._cython_agg_blocks( + agg_mgr = self._cython_agg_blocks( how, alt=alt, numeric_only=numeric_only, min_count=min_count ) - return self._wrap_agged_blocks(agg_blocks, items=agg_items) + return self._wrap_agged_blocks(agg_mgr.blocks, items=agg_mgr.items) def _cython_agg_blocks( self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 - ) -> "Tuple[List[Block], Index]": - # TODO: the actual managing of mgr_locs is a PITA - # here, it should happen via BlockManager.combine + ) -> BlockManager: data: BlockManager = self._get_data_to_aggregate() @@ -1124,7 +1121,6 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: res_values = cast_agg_result(result, bvalues, how) return res_values - skipped: List[int] = [] for i, block in enumerate(data.blocks): try: nbs = block.apply(blk_func) @@ -1132,7 +1128,7 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: # TypeError -> we may have an exception in trying to aggregate # continue and exclude the block # NotImplementedError -> "ohlc" with wrong dtype - skipped.append(i) + pass else: agg_blocks.extend(nbs) @@ -1141,9 +1137,8 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: # reset the locs in the blocks to correspond to our # current ordering - agg_items = data.reset_dropped_locs(agg_blocks, skipped) - - return agg_blocks, agg_items + new_mgr = data._combine(agg_blocks) + return new_mgr def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: if self.grouper.nkeys != 1: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index da1f10f924ae7..a5372b14d210f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1486,38 +1486,6 @@ def unstack(self, unstacker, fill_value) -> "BlockManager": bm = BlockManager(new_blocks, [new_columns, new_index]) return bm - def reset_dropped_locs(self, blocks: List[Block], skipped: List[int]) -> Index: - """ - Decrement the mgr_locs of the given blocks with `skipped` removed. - - Notes - ----- - Alters each block's mgr_locs inplace. - """ - ncols = len(self) - - new_locs = [blk.mgr_locs.as_array for blk in blocks] - indexer = np.concatenate(new_locs) - - new_items = self.items.take(np.sort(indexer)) - - if skipped: - # we need to adjust the indexer to account for the - # items we have removed - deleted_items = [self.blocks[i].mgr_locs.as_array for i in skipped] - deleted = np.concatenate(deleted_items) - ai = np.arange(ncols) - mask = np.zeros(ncols) - mask[deleted] = 1 - indexer = (ai - mask.cumsum())[indexer] - - offset = 0 - for blk in blocks: - loc = len(blk.mgr_locs) - blk.mgr_locs = indexer[offset : (offset + loc)] - offset += loc - return new_items - class SingleBlockManager(BlockManager): """ manage a single block with """ diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index a70247d9f7f9c..baabdf0fca29a 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -561,8 +561,7 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike: elif not len(res_blocks): return obj.astype("float64") - new_cols = mgr.reset_dropped_locs(res_blocks, skipped) - new_mgr = type(mgr).from_blocks(res_blocks, [new_cols, obj.index]) + new_mgr = mgr._combine(res_blocks) out = obj._constructor(new_mgr) self._insert_on_column(out, obj) return out From c74ed381d2b4d2d379f1867038d963ee5b006ee2 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Tue, 25 Aug 2020 22:12:31 +0200 Subject: [PATCH 334/632] DOC: avoid StorageOptions type alias in docstrings (#35894) --- pandas/io/excel/_base.py | 4 ++-- pandas/io/excel/_odfreader.py | 2 +- pandas/io/excel/_openpyxl.py | 2 +- pandas/io/excel/_pyxlsb.py | 2 +- pandas/io/excel/_xlrd.py | 2 +- pandas/io/feather_format.py | 12 ++++++++++-- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index aaef71910c9ab..3cd0d721bbdc6 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -200,13 +200,13 @@ Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. -storage_options : StorageOptions +storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc., if using a URL that will be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error will be raised if providing this argument with a local path or a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values + docs for the set of allowed keys and values. .. versionadded:: 1.2.0 diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index a6cd8f524503b..6cbca59aed97e 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -18,7 +18,7 @@ class _ODFReader(_BaseExcelReader): ---------- filepath_or_buffer : string, path to be parsed or an open readable stream. - storage_options : StorageOptions + storage_options : dict, optional passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 73239190604db..c2730536af8a3 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -479,7 +479,7 @@ def __init__( ---------- filepath_or_buffer : string, path object or Workbook Object to be parsed. - storage_options : StorageOptions + storage_options : dict, optional passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ import_optional_dependency("openpyxl") diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index c0e281ff6c2da..c15a52abe4d53 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -19,7 +19,7 @@ def __init__( ---------- filepath_or_buffer : str, path object, or Workbook Object to be parsed. - storage_options : StorageOptions + storage_options : dict, optional passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ import_optional_dependency("pyxlsb") diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index ff1b3c8bdb964..a7fb519af61c6 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -17,7 +17,7 @@ def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): ---------- filepath_or_buffer : string, path object or Workbook Object to be parsed. - storage_options : StorageOptions + storage_options : dict, optional passed to fsspec for appropriate URLs (see ``get_filepath_or_buffer``) """ err_msg = "Install xlrd >= 1.0.0 for Excel support" diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 2d86fa44f22a4..fb606b5ec8aef 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -16,14 +16,13 @@ def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kw ---------- df : DataFrame path : string file path, or file-like object - storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc., if using a URL that will be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error will be raised if providing this argument with a local path or a file-like buffer. See the fsspec and backend storage implementation - docs for the set of allowed keys and values + docs for the set of allowed keys and values. .. versionadded:: 1.2.0 @@ -106,6 +105,15 @@ def read_feather( Whether to parallelize reading using multiple threads. .. versionadded:: 0.24.0 + storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values. + + .. versionadded:: 1.2.0 Returns ------- From 9f3e4290a4591f1bfeefd44c59025526b16befa6 Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Tue, 25 Aug 2020 21:21:40 -0500 Subject: [PATCH 335/632] CI: Mark s3 tests parallel safe (#35895) Closes https://github.com/pandas-dev/pandas/issues/35856 --- pandas/tests/io/conftest.py | 24 +++++++++-------- pandas/tests/io/json/test_compression.py | 6 ++--- pandas/tests/io/json/test_pandas.py | 7 ++--- pandas/tests/io/test_parquet.py | 34 +++++++++++++++++------- 4 files changed, 41 insertions(+), 30 deletions(-) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 518f31d73efa9..193baa8c3ed74 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -34,12 +34,13 @@ def feather_file(datapath): @pytest.fixture -def s3so(): - return dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) +def s3so(worker_id): + worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw") + return dict(client_kwargs={"endpoint_url": f"http://127.0.0.1:555{worker_id}/"}) -@pytest.fixture(scope="module") -def s3_base(): +@pytest.fixture(scope="session") +def s3_base(worker_id): """ Fixture for mocking S3 interaction. @@ -61,11 +62,13 @@ def s3_base(): # Launching moto in server mode, i.e., as a separate process # with an S3 endpoint on localhost - endpoint_uri = "http://127.0.0.1:5555/" + worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw") + endpoint_port = f"555{worker_id}" + endpoint_uri = f"http://127.0.0.1:{endpoint_port}/" # pipe to null to avoid logging in terminal proc = subprocess.Popen( - shlex.split("moto_server s3 -p 5555"), stdout=subprocess.DEVNULL + shlex.split(f"moto_server s3 -p {endpoint_port}"), stdout=subprocess.DEVNULL ) timeout = 5 @@ -79,7 +82,7 @@ def s3_base(): pass timeout -= 0.1 time.sleep(0.1) - yield + yield endpoint_uri proc.terminate() proc.wait() @@ -119,9 +122,8 @@ def add_tips_files(bucket_name): cli.put_object(Bucket=bucket_name, Key=s3_key, Body=f) bucket = "pandas-test" - endpoint_uri = "http://127.0.0.1:5555/" - conn = boto3.resource("s3", endpoint_url=endpoint_uri) - cli = boto3.client("s3", endpoint_url=endpoint_uri) + conn = boto3.resource("s3", endpoint_url=s3_base) + cli = boto3.client("s3", endpoint_url=s3_base) try: cli.create_bucket(Bucket=bucket) @@ -143,7 +145,7 @@ def add_tips_files(bucket_name): s3fs.S3FileSystem.clear_instance_cache() yield conn - s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) + s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": s3_base}) try: s3.rm(bucket, recursive=True) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 5bb205842269e..c0e3220454bf1 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -34,7 +34,7 @@ def test_read_zipped_json(datapath): @td.skip_if_not_us_locale -def test_with_s3_url(compression, s3_resource): +def test_with_s3_url(compression, s3_resource, s3so): # Bucket "pandas-test" created in tests/io/conftest.py df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') @@ -45,9 +45,7 @@ def test_with_s3_url(compression, s3_resource): s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f) roundtripped_df = pd.read_json( - "s3://pandas-test/test-1", - compression=compression, - storage_options=dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}), + "s3://pandas-test/test-1", compression=compression, storage_options=s3so, ) tm.assert_frame_equal(df, roundtripped_df) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 64a666079876f..2022abbaee323 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1702,17 +1702,14 @@ def test_json_multiindex(self, dataframe, expected): result = series.to_json(orient="index") assert result == expected - def test_to_s3(self, s3_resource): + def test_to_s3(self, s3_resource, s3so): import time # GH 28375 mock_bucket_name, target_file = "pandas-test", "test.json" df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) df.to_json( - f"s3://{mock_bucket_name}/{target_file}", - storage_options=dict( - client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"} - ), + f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so, ) timeout = 5 while True: diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 4e0c16c71a6a8..15f9837176315 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -158,10 +158,6 @@ def check_round_trip( """ write_kwargs = write_kwargs or {"compression": None} read_kwargs = read_kwargs or {} - if isinstance(path, str) and "s3://" in path: - s3so = dict(client_kwargs={"endpoint_url": "http://127.0.0.1:5555/"}) - read_kwargs["storage_options"] = s3so - write_kwargs["storage_options"] = s3so if expected is None: expected = df @@ -555,15 +551,24 @@ def test_s3_roundtrip_explicit_fs(self, df_compat, s3_resource, pa, s3so): write_kwargs=kw, ) - def test_s3_roundtrip(self, df_compat, s3_resource, pa): + def test_s3_roundtrip(self, df_compat, s3_resource, pa, s3so): if LooseVersion(pyarrow.__version__) <= LooseVersion("0.17.0"): pytest.skip() # GH #19134 - check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet") + s3so = dict(storage_options=s3so) + check_round_trip( + df_compat, + pa, + path="s3://pandas-test/pyarrow.parquet", + read_kwargs=s3so, + write_kwargs=s3so, + ) @td.skip_if_no("s3fs") @pytest.mark.parametrize("partition_col", [["A"], []]) - def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): + def test_s3_roundtrip_for_dir( + self, df_compat, s3_resource, pa, partition_col, s3so + ): # GH #26388 expected_df = df_compat.copy() @@ -587,7 +592,10 @@ def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col): pa, expected=expected_df, path="s3://pandas-test/parquet_dir", - write_kwargs={"partition_cols": partition_col, "compression": None}, + read_kwargs=dict(storage_options=s3so), + write_kwargs=dict( + partition_cols=partition_col, compression=None, storage_options=s3so + ), check_like=True, repeat=1, ) @@ -761,9 +769,15 @@ def test_filter_row_groups(self, fp): result = read_parquet(path, fp, filters=[("a", "==", 0)]) assert len(result) == 1 - def test_s3_roundtrip(self, df_compat, s3_resource, fp): + def test_s3_roundtrip(self, df_compat, s3_resource, fp, s3so): # GH #19134 - check_round_trip(df_compat, fp, path="s3://pandas-test/fastparquet.parquet") + check_round_trip( + df_compat, + fp, + path="s3://pandas-test/fastparquet.parquet", + read_kwargs=dict(storage_options=s3so), + write_kwargs=dict(compression=None, storage_options=s3so), + ) def test_partition_cols_supported(self, fp, df_full): # GH #23283 From d90b73bdec5600d188d631150f83633f40c5b8a4 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Tue, 25 Aug 2020 23:21:44 -0400 Subject: [PATCH 336/632] CLN/BUG: Clean/Simplify _wrap_applied_output (#35792) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/groupby/generic.py | 90 +++++++++--------------------- pandas/core/indexes/api.py | 7 ++- pandas/tests/groupby/test_apply.py | 7 ++- 4 files changed, 34 insertions(+), 71 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index adc1806523d6e..55570341cf4e8 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -254,6 +254,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.apply` that would some times throw an erroneous ``ValueError`` if the grouping axis had duplicate entries (:issue:`16646`) - Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising an ``TypeError`` (:issue:`35325`) - Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) +- Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 70a8379de64e9..2afa56b50c3c7 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1197,57 +1197,25 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(keys) == 0: return self.obj._constructor(index=keys) - key_names = self.grouper.names - # GH12824 first_not_none = next(com.not_none(*values), None) if first_not_none is None: - # GH9684. If all values are None, then this will throw an error. - # We'd prefer it return an empty dataframe. + # GH9684 - All values are None, return an empty frame. return self.obj._constructor() elif isinstance(first_not_none, DataFrame): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) else: - if len(self.grouper.groupings) > 1: - key_index = self.grouper.result_index - - else: - ping = self.grouper.groupings[0] - if len(keys) == ping.ngroups: - key_index = ping.group_index - key_index.name = key_names[0] - - key_lookup = Index(keys) - indexer = key_lookup.get_indexer(key_index) - - # reorder the values - values = [values[i] for i in indexer] - - # update due to the potential reorder - first_not_none = next(com.not_none(*values), None) - else: - - key_index = Index(keys, name=key_names[0]) - - # don't use the key indexer - if not self.as_index: - key_index = None + key_index = self.grouper.result_index if self.as_index else None - # make Nones an empty object - if first_not_none is None: - return self.obj._constructor() - elif isinstance(first_not_none, NDFrame): + if isinstance(first_not_none, Series): # this is to silence a DeprecationWarning # TODO: Remove when default dtype of empty Series is object kwargs = first_not_none._construct_axes_dict() - if isinstance(first_not_none, Series): - backup = create_series_with_explicit_dtype( - **kwargs, dtype_if_empty=object - ) - else: - backup = first_not_none._constructor(**kwargs) + backup = create_series_with_explicit_dtype( + **kwargs, dtype_if_empty=object + ) values = [x if (x is not None) else backup for x in values] @@ -1256,7 +1224,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): if isinstance(v, (np.ndarray, Index, Series)) or not self.as_index: if isinstance(v, Series): applied_index = self._selected_obj._get_axis(self.axis) - all_indexed_same = all_indexes_same([x.index for x in values]) + all_indexed_same = all_indexes_same((x.index for x in values)) singular_series = len(values) == 1 and applied_index.nlevels == 1 # GH3596 @@ -1288,7 +1256,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH 8467 return self._concat_objects(keys, values, not_indexed_same=True) - if self.axis == 0 and isinstance(v, ABCSeries): # GH6124 if the list of Series have a consistent name, # then propagate that name to the result. index = v.index.copy() @@ -1301,34 +1268,27 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): if len(names) == 1: index.name = list(names)[0] - # normally use vstack as its faster than concat - # and if we have mi-columns - if ( - isinstance(v.index, MultiIndex) - or key_index is None - or isinstance(key_index, MultiIndex) - ): - stacked_values = np.vstack([np.asarray(v) for v in values]) - result = self.obj._constructor( - stacked_values, index=key_index, columns=index - ) - else: - # GH5788 instead of stacking; concat gets the - # dtypes correct - from pandas.core.reshape.concat import concat - - result = concat( - values, - keys=key_index, - names=key_index.names, - axis=self.axis, - ).unstack() - result.columns = index - elif isinstance(v, ABCSeries): + # Combine values + # vstack+constructor is faster than concat and handles MI-columns stacked_values = np.vstack([np.asarray(v) for v in values]) + + if self.axis == 0: + index = key_index + columns = v.index.copy() + if columns.name is None: + # GH6124 - propagate name of Series when it's consistent + names = {v.name for v in values} + if len(names) == 1: + columns.name = list(names)[0] + else: + index = v.index + columns = key_index + stacked_values = stacked_values.T + result = self.obj._constructor( - stacked_values.T, index=v.index, columns=key_index + stacked_values, index=index, columns=columns ) + elif not self.as_index: # We add grouping column below, so create a frame here result = DataFrame( diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 30cc8cf480dcf..d352b001f5d2a 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -297,15 +297,16 @@ def all_indexes_same(indexes): Parameters ---------- - indexes : list of Index objects + indexes : iterable of Index objects Returns ------- bool True if all indexes contain the same elements, False otherwise. """ - first = indexes[0] - for index in indexes[1:]: + itr = iter(indexes) + first = next(itr) + for index in itr: if not first.equals(index): return False return True diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index ee38722ffb8ce..a1dcb28a32c6c 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -861,13 +861,14 @@ def test_apply_multi_level_name(category): b = [1, 2] * 5 if category: b = pd.Categorical(b, categories=[1, 2, 3]) + expected_index = pd.CategoricalIndex([1, 2], categories=[1, 2, 3], name="B") + else: + expected_index = pd.Index([1, 2], name="B") df = pd.DataFrame( {"A": np.arange(10), "B": b, "C": list(range(10)), "D": list(range(10))} ).set_index(["A", "B"]) result = df.groupby("B").apply(lambda x: x.sum()) - expected = pd.DataFrame( - {"C": [20, 25], "D": [20, 25]}, index=pd.Index([1, 2], name="B") - ) + expected = pd.DataFrame({"C": [20, 25], "D": [20, 25]}, index=expected_index) tm.assert_frame_equal(result, expected) assert df.index.names == ["A", "B"] From b7a31ebeb873f37939838b3b788dab0e4c41b8de Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Wed, 26 Aug 2020 12:12:55 +0100 Subject: [PATCH 337/632] PERF: RangeIndex.format performance (#35712) --- doc/source/whatsnew/v0.25.0.rst | 2 +- doc/source/whatsnew/v1.1.2.rst | 5 +++-- pandas/core/indexes/base.py | 4 +++- pandas/core/indexes/category.py | 2 +- pandas/core/indexes/datetimelike.py | 11 ++++++++--- pandas/core/indexes/interval.py | 2 +- pandas/core/indexes/range.py | 11 ++++++++++- pandas/tests/indexes/common.py | 10 ++++++++-- pandas/tests/indexes/period/test_period.py | 6 ++++++ pandas/tests/indexes/ranges/test_range.py | 12 ++++++++++++ 10 files changed, 53 insertions(+), 12 deletions(-) diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 3cd920158f774..0f0f009307c75 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -540,7 +540,7 @@ with :attr:`numpy.nan` in the case of an empty :class:`DataFrame` (:issue:`26397 .. ipython:: python - df.describe() + df.describe() ``__str__`` methods now call ``__repr__`` rather than vice versa ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index af61354470a71..7739a483e3d38 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -15,8 +15,9 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) +- Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - -- + .. --------------------------------------------------------------------------- @@ -26,7 +27,7 @@ Bug fixes ~~~~~~~~~ - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) -- +- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ceb109fdf6d7a..b1e5d5627e3f6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -933,7 +933,9 @@ def format( return self._format_with_header(header, na_rep=na_rep) - def _format_with_header(self, header, na_rep="NaN") -> List[str_t]: + def _format_with_header( + self, header: List[str_t], na_rep: str_t = "NaN" + ) -> List[str_t]: from pandas.io.formats.format import format_array values = self._values diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 4990e6a8e20e9..cbb30763797d1 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -347,7 +347,7 @@ def _format_attrs(self): attrs.append(("length", len(self))) return attrs - def _format_with_header(self, header, na_rep="NaN") -> List[str]: + def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: from pandas.io.formats.printing import pprint_thing result = [ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 9d00f50a65a06..0e8d7c1b866b8 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -354,15 +354,20 @@ def format( """ header = [] if name: - fmt_name = ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) - header.append(fmt_name) + header.append( + ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) + if self.name is not None + else "" + ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep, date_format=date_format) - def _format_with_header(self, header, na_rep="NaT", date_format=None) -> List[str]: + def _format_with_header( + self, header: List[str], na_rep: str = "NaT", date_format: Optional[str] = None + ) -> List[str]: return header + list( self._format_native_types(na_rep=na_rep, date_format=date_format) ) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e8d0a44324cc5..9281f8017761d 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -948,7 +948,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): # Rendering Methods # __repr__ associated methods are based on MultiIndex - def _format_with_header(self, header, na_rep="NaN") -> List[str]: + def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: return header + list(self._format_native_types(na_rep=na_rep)) def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index c5572a9de7fa5..b85e2d3947cb1 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,7 +1,7 @@ from datetime import timedelta import operator from sys import getsizeof -from typing import Any +from typing import Any, List import warnings import numpy as np @@ -187,6 +187,15 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None + def _format_with_header(self, header: List[str], na_rep: str = "NaN") -> List[str]: + if not len(self._range): + return header + first_val_str = str(self._range[0]) + last_val_str = str(self._range[-1]) + max_length = max(len(first_val_str), len(last_val_str)) + + return header + [f"{x:<{max_length}}" for x in self._range] + # -------------------------------------------------------------------- _deprecation_message = ( "RangeIndex.{} is deprecated and will be " diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index e4d0b46f7c716..e95e7267f17ec 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -1,5 +1,5 @@ import gc -from typing import Optional, Type +from typing import Type import numpy as np import pytest @@ -33,7 +33,7 @@ class Base: """ base class for index sub-class tests """ - _holder: Optional[Type[Index]] = None + _holder: Type[Index] _compat_props = ["shape", "ndim", "size", "nbytes"] def create_index(self) -> Index: @@ -686,6 +686,12 @@ def test_format(self): expected = [str(x) for x in idx] assert idx.format() == expected + def test_format_empty(self): + # GH35712 + empty_idx = self._holder([]) + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + def test_hasnans_isnans(self, index): # GH 11343, added tests for hasnans / isnans if isinstance(index, MultiIndex): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 15a88ab3819ce..085d41aaa5b76 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -536,6 +536,12 @@ def test_contains_raise_error_if_period_index_is_in_multi_index(self, msg, key): with pytest.raises(KeyError, match=msg): df.loc[key] + def test_format_empty(self): + # GH35712 + empty_idx = self._holder([], freq="A") + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] + def test_maybe_convert_timedelta(): pi = PeriodIndex(["2000", "2001"], freq="D") diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index c4c242746e92c..172cd4a106ac1 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -171,8 +171,14 @@ def test_cache(self): pass assert idx._cache == {} + idx.format() + assert idx._cache == {} + df = pd.DataFrame({"a": range(10)}, index=idx) + str(df) + assert idx._cache == {} + df.loc[50] assert idx._cache == {} @@ -515,3 +521,9 @@ def test_engineless_lookup(self): idx.get_loc("a") assert "_engine" not in idx._cache + + def test_format_empty(self): + # GH35712 + empty_idx = self._holder(0) + assert empty_idx.format() == [] + assert empty_idx.format(name=True) == [""] From e666f5cc59a8631a5ee3e38c20ad8e08531cbec3 Mon Sep 17 00:00:00 2001 From: Avinash Pancham <44933366+avinashpancham@users.noreply.github.com> Date: Wed, 26 Aug 2020 14:44:25 +0200 Subject: [PATCH 338/632] TST: Verify whether read-only datetime64 array can be factorized (35650) (#35775) * TST: Verify whether read-only datetime64 array can be factorized (35650) * CLN: Make test more inline with similar factorize tests --- pandas/tests/test_algos.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 6c6bdb6b1b2bd..67a2dc2303550 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -252,6 +252,19 @@ def test_object_factorize(self, writable): tm.assert_numpy_array_equal(codes, expected_codes) tm.assert_numpy_array_equal(uniques, expected_uniques) + def test_datetime64_factorize(self, writable): + # GH35650 Verify whether read-only datetime64 array can be factorized + data = np.array([np.datetime64("2020-01-01T00:00:00.000")]) + data.setflags(write=writable) + expected_codes = np.array([0], dtype=np.int64) + expected_uniques = np.array( + ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]" + ) + + codes, uniques = pd.factorize(data) + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_numpy_array_equal(uniques, expected_uniques) + def test_deprecate_order(self): # gh 19727 - check warning is raised for deprecated keyword, order. # Test not valid once order keyword is removed. From d1e29bec51d7bb43756f938aada5a83f4ba6b1b2 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Wed, 26 Aug 2020 08:57:40 -0500 Subject: [PATCH 339/632] Bump asv Python version (#35798) * Bump asv Python version * 3.7.1 -> 3.8 --- asv_bench/asv.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 4583fac85b776..1863a17e3d5f7 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -26,7 +26,7 @@ // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. // "pythons": ["2.7", "3.4"], - "pythons": ["3.6"], + "pythons": ["3.8"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty From d0ca4b347b060bc4a49b2d31a818d5a28d1e665f Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Wed, 26 Aug 2020 21:22:02 -0500 Subject: [PATCH 340/632] Fix Series construction from Sparse["datetime64[ns]"] (#35838) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/construction.py | 6 ++++-- pandas/core/dtypes/cast.py | 7 +++++-- pandas/tests/series/test_constructors.py | 15 +++++++++++++++ 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 7739a483e3d38..9747a8ef3e71f 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -26,6 +26,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) +- Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) - diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 47f10f1f65f4a..e8c9f28e50084 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -35,6 +35,7 @@ is_iterator, is_list_like, is_object_dtype, + is_sparse, is_timedelta64_ns_dtype, ) from pandas.core.dtypes.generic import ( @@ -535,9 +536,10 @@ def _try_cast( if maybe_castable(arr) and not copy and dtype is None: return arr - if isinstance(dtype, ExtensionDtype) and dtype.kind != "M": + if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)): # create an extension array from its dtype - # DatetimeTZ case needs to go through maybe_cast_to_datetime + # DatetimeTZ case needs to go through maybe_cast_to_datetime but + # SparseDtype does not array_type = dtype.construct_array_type()._from_sequence subarr = array_type(arr, dtype=dtype, copy=copy) return subarr diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 2697f42eb05a4..e6b4cb598989b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -50,6 +50,7 @@ is_numeric_dtype, is_object_dtype, is_scalar, + is_sparse, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, @@ -1323,7 +1324,9 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"): f"Please pass in '{dtype.name}[ns]' instead." ) - if is_datetime64 and not is_dtype_equal(dtype, DT64NS_DTYPE): + if is_datetime64 and not is_dtype_equal( + getattr(dtype, "subtype", dtype), DT64NS_DTYPE + ): # pandas supports dtype whose granularity is less than [ns] # e.g., [ps], [fs], [as] @@ -1355,7 +1358,7 @@ def maybe_cast_to_datetime(value, dtype, errors: str = "raise"): if is_scalar(value): if value == iNaT or isna(value): value = iNaT - else: + elif not is_sparse(value): value = np.array(value, copy=False) # have a scalar array-like (e.g. NaT) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 1dd410ad02ee0..bcf7039ec9039 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1449,3 +1449,18 @@ def test_constructor_datetimelike_scalar_to_string_dtype(self): result = Series("M", index=[1, 2, 3], dtype="string") expected = pd.Series(["M", "M", "M"], index=[1, 2, 3], dtype="string") tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + [np.datetime64("2012-01-01"), np.datetime64("2013-01-01")], + ["2012-01-01", "2013-01-01"], + ], + ) + def test_constructor_sparse_datetime64(self, values): + # https://github.com/pandas-dev/pandas/issues/35762 + dtype = pd.SparseDtype("datetime64[ns]") + result = pd.Series(values, dtype=dtype) + arr = pd.arrays.SparseArray(values, dtype=dtype) + expected = pd.Series(arr) + tm.assert_series_equal(result, expected) From e3bcf8d1949833793ff50cca393240dab0f0d461 Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Thu, 27 Aug 2020 17:06:25 +0100 Subject: [PATCH 341/632] Attempt to unpin pytest-xdist (#35910) --- ci/deps/azure-windows-37.yaml | 2 +- ci/deps/azure-windows-38.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 4894129915722..1d15ca41c0f8e 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21,<2.0.0 # GH 35737 + - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 2853e12b28e35..23bede5eb26f1 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -8,7 +8,7 @@ dependencies: # tools - cython>=0.29.16 - pytest>=5.0.1 - - pytest-xdist>=1.21,<2.0.0 # GH 35737 + - pytest-xdist>=1.21 - hypothesis>=3.58.0 - pytest-azurepipelines From a1f605697e44e7668b64841baf4ab2988470d08c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 27 Aug 2020 11:25:02 -0700 Subject: [PATCH 342/632] TYP: annotate tseries.holiday (#35913) --- pandas/tseries/holiday.py | 28 +++++++++++++++------------- setup.cfg | 3 --- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 8ab37f787bd10..d8a3040919e7b 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -12,7 +12,7 @@ from pandas.tseries.offsets import Day, Easter -def next_monday(dt): +def next_monday(dt: datetime) -> datetime: """ If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead @@ -24,7 +24,7 @@ def next_monday(dt): return dt -def next_monday_or_tuesday(dt): +def next_monday_or_tuesday(dt: datetime) -> datetime: """ For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; @@ -39,7 +39,7 @@ def next_monday_or_tuesday(dt): return dt -def previous_friday(dt): +def previous_friday(dt: datetime) -> datetime: """ If holiday falls on Saturday or Sunday, use previous Friday instead. """ @@ -50,7 +50,7 @@ def previous_friday(dt): return dt -def sunday_to_monday(dt): +def sunday_to_monday(dt: datetime) -> datetime: """ If holiday falls on Sunday, use day thereafter (Monday) instead. """ @@ -59,7 +59,7 @@ def sunday_to_monday(dt): return dt -def weekend_to_monday(dt): +def weekend_to_monday(dt: datetime) -> datetime: """ If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. @@ -72,7 +72,7 @@ def weekend_to_monday(dt): return dt -def nearest_workday(dt): +def nearest_workday(dt: datetime) -> datetime: """ If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead. @@ -84,7 +84,7 @@ def nearest_workday(dt): return dt -def next_workday(dt): +def next_workday(dt: datetime) -> datetime: """ returns next weekday used for observances """ @@ -95,7 +95,7 @@ def next_workday(dt): return dt -def previous_workday(dt): +def previous_workday(dt: datetime) -> datetime: """ returns previous weekday used for observances """ @@ -106,14 +106,14 @@ def previous_workday(dt): return dt -def before_nearest_workday(dt): +def before_nearest_workday(dt: datetime) -> datetime: """ returns previous workday after nearest workday """ return previous_workday(nearest_workday(dt)) -def after_nearest_workday(dt): +def after_nearest_workday(dt: datetime) -> datetime: """ returns next workday after nearest workday needed for Boxing day or multiple holidays in a series @@ -428,9 +428,11 @@ def holidays(self, start=None, end=None, return_name=False): # If we don't have a cache or the dates are outside the prior cache, we # get them again if self._cache is None or start < self._cache[0] or end > self._cache[1]: - holidays = [rule.dates(start, end, return_name=True) for rule in self.rules] - if holidays: - holidays = concat(holidays) + pre_holidays = [ + rule.dates(start, end, return_name=True) for rule in self.rules + ] + if pre_holidays: + holidays = concat(pre_holidays) else: holidays = Series(index=DatetimeIndex([]), dtype=object) diff --git a/setup.cfg b/setup.cfg index e4c0b3dcf37ef..aa1535a171f0a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -276,6 +276,3 @@ check_untyped_defs=False [mypy-pandas.plotting._matplotlib.misc] check_untyped_defs=False - -[mypy-pandas.tseries.holiday] -check_untyped_defs=False From 1f35b062158ddf151b5168255bdd7e1c29a9e2de Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 28 Aug 2020 02:07:05 -0700 Subject: [PATCH 343/632] TYP: annotations in pandas.plotting (#35935) --- pandas/plotting/_matplotlib/converter.py | 9 +++++---- pandas/plotting/_matplotlib/timeseries.py | 4 ++-- pandas/plotting/_matplotlib/tools.py | 22 ++++++++++++++-------- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 8f2080658e63e..214a67690d695 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -1,7 +1,8 @@ import contextlib import datetime as pydt -from datetime import datetime, timedelta +from datetime import datetime, timedelta, tzinfo import functools +from typing import Optional, Tuple from dateutil.relativedelta import relativedelta import matplotlib.dates as dates @@ -152,7 +153,7 @@ def axisinfo(unit, axis): return units.AxisInfo(majloc=majloc, majfmt=majfmt, label="time") @staticmethod - def default_units(x, axis): + def default_units(x, axis) -> str: return "time" @@ -421,7 +422,7 @@ def autoscale(self): return self.nonsingular(vmin, vmax) -def _from_ordinal(x, tz=None): +def _from_ordinal(x, tz: Optional[tzinfo] = None) -> datetime: ix = int(x) dt = datetime.fromordinal(ix) remainder = float(x) - ix @@ -450,7 +451,7 @@ def _from_ordinal(x, tz=None): # ------------------------------------------------------------------------- -def _get_default_annual_spacing(nyears): +def _get_default_annual_spacing(nyears) -> Tuple[int, int]: """ Returns a default spacing between consecutive ticks for annual data. """ diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index eef4276f0ed09..193602e1baf4a 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -62,13 +62,13 @@ def _maybe_resample(series: "Series", ax, kwargs): return freq, series -def _is_sub(f1, f2): +def _is_sub(f1: str, f2: str) -> bool: return (f1.startswith("W") and is_subperiod("D", f2)) or ( f2.startswith("W") and is_subperiod(f1, "D") ) -def _is_sup(f1, f2): +def _is_sup(f1: str, f2: str) -> bool: return (f1.startswith("W") and is_superperiod("D", f2)) or ( f2.startswith("W") and is_superperiod(f1, "D") ) diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index caf2f27de9276..26b25597ce1a6 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -1,16 +1,22 @@ # being a bit too dynamic from math import ceil +from typing import TYPE_CHECKING, Tuple import warnings import matplotlib.table import matplotlib.ticker as ticker import numpy as np +from pandas._typing import FrameOrSeries + from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.plotting._matplotlib import compat +if TYPE_CHECKING: + from matplotlib.table import Table + def format_date_labels(ax, rot): # mini version of autofmt_xdate @@ -21,7 +27,7 @@ def format_date_labels(ax, rot): fig.subplots_adjust(bottom=0.2) -def table(ax, data, rowLabels=None, colLabels=None, **kwargs): +def table(ax, data: FrameOrSeries, rowLabels=None, colLabels=None, **kwargs) -> "Table": if isinstance(data, ABCSeries): data = data.to_frame() elif isinstance(data, ABCDataFrame): @@ -43,7 +49,7 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs): return table -def _get_layout(nplots, layout=None, layout_type="box"): +def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int, int]: if layout is not None: if not isinstance(layout, (tuple, list)) or len(layout) != 2: raise ValueError("Layout must be a tuple of (rows, columns)") @@ -92,14 +98,14 @@ def _get_layout(nplots, layout=None, layout_type="box"): def _subplots( - naxes=None, - sharex=False, - sharey=False, - squeeze=True, + naxes: int, + sharex: bool = False, + sharey: bool = False, + squeeze: bool = True, subplot_kw=None, ax=None, layout=None, - layout_type="box", + layout_type: str = "box", **fig_kw, ): """ @@ -369,7 +375,7 @@ def _get_all_lines(ax): return lines -def _get_xlim(lines): +def _get_xlim(lines) -> Tuple[float, float]: left, right = np.inf, -np.inf for l in lines: x = l.get_xdata(orig=False) From 328a1dcf7b3f63d35f14da0e007e9911c7c6f9b9 Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Fri, 28 Aug 2020 10:21:58 +0100 Subject: [PATCH 344/632] remove unnecessary trailing commas (#35930) --- pandas/core/aggregation.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index 891048ae82dfd..e2374b81ca13b 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -28,10 +28,8 @@ def reconstruct_func( - func: Optional[AggFuncType], **kwargs, -) -> Tuple[ - bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]], -]: + func: Optional[AggFuncType], **kwargs +) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]: """ This is the internal function to reconstruct func given if there is relabeling or not and also normalize the keyword to get new order of columns. From 47af138ea79d678a7422eb8f0963cc47e737e187 Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Fri, 28 Aug 2020 10:26:43 +0100 Subject: [PATCH 345/632] CLN remove unnecessary trailing commas to get ready for new version of black: _testing -> generic (#35949) * pandas/_testing.py * pandas/core/algorithms.py * pandas/core/arrays/_mixins.py * pandas/core/arrays/categorical.py * pandas/core/arrays/integer.py * pandas/core/arrays/masked.py * pandas/core/arrays/numpy_.py * pandas/core/arrays/period.py * pandas/core/construction.py * pandas/core/generic.py --- pandas/_testing.py | 12 +++++------- pandas/core/algorithms.py | 2 +- pandas/core/arrays/_mixins.py | 2 +- pandas/core/arrays/categorical.py | 2 +- pandas/core/arrays/integer.py | 4 ++-- pandas/core/arrays/masked.py | 2 +- pandas/core/arrays/numpy_.py | 2 +- pandas/core/arrays/period.py | 2 +- pandas/core/construction.py | 4 +--- pandas/core/generic.py | 18 +++++++----------- 10 files changed, 21 insertions(+), 29 deletions(-) diff --git a/pandas/_testing.py b/pandas/_testing.py index ef6232fa6d575..b402b040d9268 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -939,7 +939,7 @@ def assert_categorical_equal( if check_category_order: assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories") assert_numpy_array_equal( - left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes", + left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes" ) else: try: @@ -948,9 +948,7 @@ def assert_categorical_equal( except TypeError: # e.g. '<' not supported between instances of 'int' and 'str' lc, rc = left.categories, right.categories - assert_index_equal( - lc, rc, obj=f"{obj}.categories", - ) + assert_index_equal(lc, rc, obj=f"{obj}.categories") assert_index_equal( left.categories.take(left.codes), right.categories.take(right.codes), @@ -1092,7 +1090,7 @@ def _raise(left, right, err_msg): if err_msg is None: if left.shape != right.shape: raise_assert_detail( - obj, f"{obj} shapes are different", left.shape, right.shape, + obj, f"{obj} shapes are different", left.shape, right.shape ) diff = 0 @@ -1559,7 +1557,7 @@ def assert_frame_equal( # shape comparison if left.shape != right.shape: raise_assert_detail( - obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}", + obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}" ) if check_like: @@ -2884,7 +2882,7 @@ def convert_rows_list_to_csv_str(rows_list: List[str]): return expected -def external_error_raised(expected_exception: Type[Exception],) -> ContextManager: +def external_error_raised(expected_exception: Type[Exception]) -> ContextManager: """ Helper function to mark pytest.raises that have an external error message. diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index befde7c355818..2a6e983eff3ee 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -462,7 +462,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: def _factorize_array( - values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None, + values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None ) -> Tuple[np.ndarray, np.ndarray]: """ Factorize an array-like to codes and uniques. diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 832d09b062265..2976747d66dfa 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -40,7 +40,7 @@ def take( fill_value = self._validate_fill_value(fill_value) new_data = take( - self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value, + self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value ) return self._from_backing_data(new_data) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a28b341669918..27b1afdb438cb 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1505,7 +1505,7 @@ def argsort(self, ascending=True, kind="quicksort", **kwargs): return super().argsort(ascending=ascending, kind=kind, **kwargs) def sort_values( - self, inplace: bool = False, ascending: bool = True, na_position: str = "last", + self, inplace: bool = False, ascending: bool = True, na_position: str = "last" ): """ Sort the Categorical by category value returning a new diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index 57df067c7b16e..d83ff91a1315f 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -138,7 +138,7 @@ def __from_arrow__( return IntegerArray._concat_same_type(results) -def integer_array(values, dtype=None, copy: bool = False,) -> "IntegerArray": +def integer_array(values, dtype=None, copy: bool = False) -> "IntegerArray": """ Infer and return an integer array of the values. @@ -182,7 +182,7 @@ def safe_cast(values, dtype, copy: bool): def coerce_to_array( - values, dtype, mask=None, copy: bool = False, + values, dtype, mask=None, copy: bool = False ) -> Tuple[np.ndarray, np.ndarray]: """ Coerce the input values array to numpy arrays with a mask diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 235840d6d201e..1237dea5c1a64 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -126,7 +126,7 @@ def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT: return type(self)(~self._data, self._mask) def to_numpy( - self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default, + self, dtype=None, copy: bool = False, na_value: Scalar = lib.no_default ) -> np.ndarray: """ Convert to a NumPy Array. diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 05f901518d82f..23a4a70734c81 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -280,7 +280,7 @@ def isna(self) -> np.ndarray: return isna(self._ndarray) def fillna( - self, value=None, method: Optional[str] = None, limit: Optional[int] = None, + self, value=None, method: Optional[str] = None, limit: Optional[int] = None ) -> "PandasArray": # TODO(_values_for_fillna): remove this value, method = validate_fillna_kwargs(value, method) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index ddaf6d39f1837..cc39ffb5d1203 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -634,7 +634,7 @@ def _sub_period_array(self, other): return new_values def _addsub_int_array( - self, other: np.ndarray, op: Callable[[Any, Any], Any], + self, other: np.ndarray, op: Callable[[Any, Any], Any] ) -> "PeriodArray": """ Add or subtract array of integers; equivalent to applying diff --git a/pandas/core/construction.py b/pandas/core/construction.py index e8c9f28e50084..f145e76046bee 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -514,9 +514,7 @@ def sanitize_array( return subarr -def _try_cast( - arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool, -): +def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool): """ Convert input to numpy ndarray and optionally cast to a given dtype. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 286da6e1de9d5..fea3efedb6abb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -315,17 +315,13 @@ def _data(self): @property def _AXIS_NUMBERS(self) -> Dict[str, int]: """.. deprecated:: 1.1.0""" - warnings.warn( - "_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=3, - ) + warnings.warn("_AXIS_NUMBERS has been deprecated.", FutureWarning, stacklevel=3) return {"index": 0} @property def _AXIS_NAMES(self) -> Dict[int, str]: """.. deprecated:: 1.1.0""" - warnings.warn( - "_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3, - ) + warnings.warn("_AXIS_NAMES has been deprecated.", FutureWarning, stacklevel=3) return {0: "index"} def _construct_axes_dict(self, axes=None, **kwargs): @@ -5128,7 +5124,7 @@ def pipe(self, func, *args, **kwargs): ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP - """ + """ return com.pipe(self, func, *args, **kwargs) _shared_docs["aggregate"] = dedent( @@ -5630,7 +5626,7 @@ def astype( else: # else, only a single dtype is given - new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors,) + new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series @@ -6520,7 +6516,7 @@ def replace( 3 b 4 b dtype: object - """ + """ if not ( is_scalar(to_replace) or is_re_compilable(to_replace) @@ -7772,7 +7768,7 @@ def between_time( raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_between_time( - start_time, end_time, include_start=include_start, include_end=include_end, + start_time, end_time, include_start=include_start, include_end=include_end ) return self._take_with_is_copy(indexer, axis=axis) @@ -8939,7 +8935,7 @@ def _where( self._check_inplace_setting(other) new_data = self._mgr.putmask( - mask=cond, new=other, align=align, axis=block_axis, + mask=cond, new=other, align=align, axis=block_axis ) result = self._constructor(new_data) return self._update_inplace(result) From 00cb9922f4334256d4195440b268270bf750847f Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Fri, 28 Aug 2020 10:27:16 +0100 Subject: [PATCH 346/632] CLN remove unnecessary trailing commas to get ready for new version of black: generic -> blocks (#35950) * pandas/core/groupby/generic.py * pandas/core/groupby/groupby.py * pandas/core/groupby/ops.py * pandas/core/indexes/datetimelike.py * pandas/core/indexes/interval.py * pandas/core/indexes/numeric.py * pandas/core/indexes/range.py * pandas/core/internals/blocks.py --- pandas/core/groupby/generic.py | 8 ++------ pandas/core/groupby/groupby.py | 8 +++----- pandas/core/groupby/ops.py | 14 ++++---------- pandas/core/indexes/datetimelike.py | 4 +--- pandas/core/indexes/interval.py | 4 ++-- pandas/core/indexes/numeric.py | 2 +- pandas/core/indexes/range.py | 2 +- pandas/core/internals/blocks.py | 30 ++++++++++++++--------------- 8 files changed, 29 insertions(+), 43 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 2afa56b50c3c7..82e629d184b19 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -221,9 +221,7 @@ def _selection_name(self): def apply(self, func, *args, **kwargs): return super().apply(func, *args, **kwargs) - @doc( - _agg_template, examples=_agg_examples_doc, klass="Series", - ) + @doc(_agg_template, examples=_agg_examples_doc, klass="Series") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): @@ -935,9 +933,7 @@ class DataFrameGroupBy(GroupBy[DataFrame]): See :ref:`groupby.aggregate.named` for more.""" ) - @doc( - _agg_template, examples=_agg_examples_doc, klass="DataFrame", - ) + @doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame") def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): if maybe_use_numba(engine): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f96b488fb8d0d..a91366af61d0d 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1077,7 +1077,7 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) tuple(args), kwargs, func, engine_kwargs ) result = numba_agg_func( - sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns), + sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) ) if cache_key not in NUMBA_FUNC_CACHE: NUMBA_FUNC_CACHE[cache_key] = numba_agg_func @@ -1595,8 +1595,7 @@ def max(self, numeric_only: bool = False, min_count: int = -1): def first(self, numeric_only: bool = False, min_count: int = -1): def first_compat(obj: FrameOrSeries, axis: int = 0): def first(x: Series): - """Helper function for first item that isn't NA. - """ + """Helper function for first item that isn't NA.""" x = x.array[notna(x.array)] if len(x) == 0: return np.nan @@ -1620,8 +1619,7 @@ def first(x: Series): def last(self, numeric_only: bool = False, min_count: int = -1): def last_compat(obj: FrameOrSeries, axis: int = 0): def last(x: Series): - """Helper function for last item that isn't NA. - """ + """Helper function for last item that isn't NA.""" x = x.array[notna(x.array)] if len(x) == 0: return np.nan diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c6171a55359fe..290680f380f5f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -583,7 +583,7 @@ def transform(self, values, how: str, axis: int = 0, **kwargs): return self._cython_operation("transform", values, how, axis, **kwargs) def _aggregate( - self, result, counts, values, comp_ids, agg_func, min_count: int = -1, + self, result, counts, values, comp_ids, agg_func, min_count: int = -1 ): if agg_func is libgroupby.group_nth: # different signature from the others @@ -603,9 +603,7 @@ def _transform( return result - def agg_series( - self, obj: Series, func: F, *args, **kwargs, - ): + def agg_series(self, obj: Series, func: F, *args, **kwargs): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 @@ -653,9 +651,7 @@ def _aggregate_series_fast(self, obj: Series, func: F): result, counts = grouper.get_result() return result, counts - def _aggregate_series_pure_python( - self, obj: Series, func: F, *args, **kwargs, - ): + def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs): group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) @@ -841,9 +837,7 @@ def groupings(self) -> "List[grouper.Grouping]": for lvl, name in zip(self.levels, self.names) ] - def agg_series( - self, obj: Series, func: F, *args, **kwargs, - ): + def agg_series(self, obj: Series, func: F, *args, **kwargs): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0e8d7c1b866b8..efe1a853a9a76 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -81,9 +81,7 @@ def wrapper(left, right): DatetimeLikeArrayMixin, cache=True, ) -@inherit_names( - ["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin, -) +@inherit_names(["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin) class DatetimeIndexOpsMixin(ExtensionIndex): """ Common ops mixin to support a unified interface datetimelike Index. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9281f8017761d..5d309ef7cd515 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -182,10 +182,10 @@ def func(intvidx_self, other, sort=False): ) @inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True) @inherit_names( - ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray, + ["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray ) @inherit_names( - ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True, + ["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True ) class IntervalIndex(IntervalMixin, ExtensionIndex): _typ = "intervalindex" diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 731907993d08f..80bb9f10fadd9 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -436,7 +436,7 @@ def isin(self, values, level=None): def _is_compatible_with_other(self, other) -> bool: return super()._is_compatible_with_other(other) or all( isinstance( - obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex), + obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex) ) for obj in [self, other] ) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index b85e2d3947cb1..f1457a9aac62b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -82,7 +82,7 @@ class RangeIndex(Int64Index): # Constructors def __new__( - cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None, + cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None ): cls._validate_dtype(dtype) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index c62be4f767f00..a38b47a4c2a25 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -724,7 +724,7 @@ def replace( # _can_hold_element checks have reduced this back to the # scalar case and we can avoid a costly object cast return self.replace( - to_replace[0], value, inplace=inplace, regex=regex, convert=convert, + to_replace[0], value, inplace=inplace, regex=regex, convert=convert ) # GH 22083, TypeError or ValueError occurred within error handling @@ -905,7 +905,7 @@ def setitem(self, indexer, value): return block def putmask( - self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, + self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False ) -> List["Block"]: """ putmask the data to the block; it is possible that we may create a @@ -1292,7 +1292,7 @@ def shift(self, periods: int, axis: int = 0, fill_value=None): return [self.make_block(new_values)] def where( - self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0, + self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0 ) -> List["Block"]: """ evaluate the block; return result block(s) from the result @@ -1366,7 +1366,7 @@ def where_func(cond, values, other): # we are explicitly ignoring errors block = self.coerce_to_target_dtype(other) blocks = block.where( - orig_other, cond, errors=errors, try_cast=try_cast, axis=axis, + orig_other, cond, errors=errors, try_cast=try_cast, axis=axis ) return self._maybe_downcast(blocks, "infer") @@ -1605,7 +1605,7 @@ def set(self, locs, values): self.values = values def putmask( - self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False, + self, mask, new, inplace: bool = False, axis: int = 0, transpose: bool = False ) -> List["Block"]: """ See Block.putmask.__doc__ @@ -1816,7 +1816,7 @@ def diff(self, n: int, axis: int = 1) -> List["Block"]: return super().diff(n, axis) def shift( - self, periods: int, axis: int = 0, fill_value: Any = None, + self, periods: int, axis: int = 0, fill_value: Any = None ) -> List["ExtensionBlock"]: """ Shift the block by `periods`. @@ -1833,7 +1833,7 @@ def shift( ] def where( - self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0, + self, other, cond, errors="raise", try_cast: bool = False, axis: int = 0 ) -> List["Block"]: cond = _extract_bool_array(cond) @@ -1945,7 +1945,7 @@ def _can_hold_element(self, element: Any) -> bool: ) def to_native_types( - self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs, + self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs ): """ convert to our native types format """ values = self.values @@ -2369,7 +2369,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): if not np.can_cast(to_replace_values, bool): return self return super().replace( - to_replace, value, inplace=inplace, regex=regex, convert=convert, + to_replace, value, inplace=inplace, regex=regex, convert=convert ) @@ -2453,18 +2453,18 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): if not either_list and is_re(to_replace): return self._replace_single( - to_replace, value, inplace=inplace, regex=True, convert=convert, + to_replace, value, inplace=inplace, regex=True, convert=convert ) elif not (either_list or regex): return super().replace( - to_replace, value, inplace=inplace, regex=regex, convert=convert, + to_replace, value, inplace=inplace, regex=regex, convert=convert ) elif both_lists: for to_rep, v in zip(to_replace, value): result_blocks = [] for b in blocks: result = b._replace_single( - to_rep, v, inplace=inplace, regex=regex, convert=convert, + to_rep, v, inplace=inplace, regex=regex, convert=convert ) result_blocks = _extend_blocks(result, result_blocks) blocks = result_blocks @@ -2475,18 +2475,18 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): result_blocks = [] for b in blocks: result = b._replace_single( - to_rep, value, inplace=inplace, regex=regex, convert=convert, + to_rep, value, inplace=inplace, regex=regex, convert=convert ) result_blocks = _extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks return self._replace_single( - to_replace, value, inplace=inplace, convert=convert, regex=regex, + to_replace, value, inplace=inplace, convert=convert, regex=regex ) def _replace_single( - self, to_replace, value, inplace=False, regex=False, convert=True, mask=None, + self, to_replace, value, inplace=False, regex=False, convert=True, mask=None ): """ Replace elements by the given value. From 85d048b3c279d740b918a19a568bb2111854aa9e Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Fri, 28 Aug 2020 18:00:57 +0100 Subject: [PATCH 347/632] TYP: misc cleanup in core\groupby\generic.py (#35955) --- pandas/core/groupby/generic.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 82e629d184b19..3172fb4e0e853 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -9,7 +9,6 @@ import copy from functools import partial from textwrap import dedent -import typing from typing import ( TYPE_CHECKING, Any, @@ -22,6 +21,7 @@ Optional, Sequence, Type, + TypeVar, Union, ) import warnings @@ -92,7 +92,7 @@ # TODO: validate types on ScalarResult and move to _typing # Blocked from using by https://github.com/python/mypy/issues/1484 # See note at _mangle_lambda_list -ScalarResult = typing.TypeVar("ScalarResult") +ScalarResult = TypeVar("ScalarResult") def generate_property(name: str, klass: Type[FrameOrSeries]): @@ -606,8 +606,8 @@ def filter(self, func, dropna=True, *args, **kwargs): wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. - def true_and_notna(x, *args, **kwargs) -> bool: - b = wrapper(x, *args, **kwargs) + def true_and_notna(x) -> bool: + b = wrapper(x) return b and notna(b) try: @@ -1210,7 +1210,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # TODO: Remove when default dtype of empty Series is object kwargs = first_not_none._construct_axes_dict() backup = create_series_with_explicit_dtype( - **kwargs, dtype_if_empty=object + dtype_if_empty=object, **kwargs ) values = [x if (x is not None) else backup for x in values] From cb76a6d32ef7826d4036ec6d5ec38a82aa7df1b3 Mon Sep 17 00:00:00 2001 From: Johnny Pribyl Date: Fri, 28 Aug 2020 12:06:08 -0600 Subject: [PATCH 348/632] Issue35925 remove trailing commas (#35956) --- pandas/core/internals/concat.py | 6 +++--- pandas/core/internals/managers.py | 8 +++----- pandas/core/internals/ops.py | 2 +- pandas/core/nanops.py | 2 +- pandas/core/ops/docstrings.py | 2 +- pandas/core/reshape/concat.py | 2 +- pandas/core/reshape/pivot.py | 4 ++-- pandas/core/reshape/reshape.py | 8 +++----- 8 files changed, 15 insertions(+), 19 deletions(-) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 2c0d4931a7bf2..99a586f056b12 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -29,7 +29,7 @@ def concatenate_block_managers( - mgrs_indexers, axes, concat_axis: int, copy: bool, + mgrs_indexers, axes, concat_axis: int, copy: bool ) -> BlockManager: """ Concatenate block managers into one. @@ -76,7 +76,7 @@ def concatenate_block_managers( b = make_block(values, placement=placement, ndim=blk.ndim) else: b = make_block( - _concatenate_join_units(join_units, concat_axis, copy=copy,), + _concatenate_join_units(join_units, concat_axis, copy=copy), placement=placement, ) blocks.append(b) @@ -339,7 +339,7 @@ def _concatenate_join_units(join_units, concat_axis, copy): # 2D to put it a non-EA Block concat_values = np.atleast_2d(concat_values) else: - concat_values = concat_compat(to_concat, axis=concat_axis,) + concat_values = concat_compat(to_concat, axis=concat_axis) return concat_values diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index a5372b14d210f..67ff3b9456ccf 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -491,7 +491,7 @@ def get_axe(block, qs, axes): values = values.take(indexer) return SingleBlockManager( - make_block(values, ndim=1, placement=np.arange(len(values))), axes[0], + make_block(values, ndim=1, placement=np.arange(len(values))), axes[0] ) def isna(self, func) -> "BlockManager": @@ -519,9 +519,7 @@ def where( def setitem(self, indexer, value) -> "BlockManager": return self.apply("setitem", indexer=indexer, value=value) - def putmask( - self, mask, new, align: bool = True, axis: int = 0, - ): + def putmask(self, mask, new, align: bool = True, axis: int = 0): transpose = self.ndim == 2 if align: @@ -1923,7 +1921,7 @@ def _compare_or_regex_search( """ def _check_comparison_types( - result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern], + result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern] ): """ Raises an error if the two arrays (a,b) cannot be compared. diff --git a/pandas/core/internals/ops.py b/pandas/core/internals/ops.py index ae4892c720d5b..05f5f9a00ae1b 100644 --- a/pandas/core/internals/ops.py +++ b/pandas/core/internals/ops.py @@ -11,7 +11,7 @@ BlockPairInfo = namedtuple( - "BlockPairInfo", ["lvals", "rvals", "locs", "left_ea", "right_ea", "rblk"], + "BlockPairInfo", ["lvals", "rvals", "locs", "left_ea", "right_ea", "rblk"] ) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e7e28798d84a2..e3f16a3ef4f90 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1329,7 +1329,7 @@ def _zero_out_fperr(arg): @disallow("M8", "m8") def nancorr( - a: np.ndarray, b: np.ndarray, method="pearson", min_periods: Optional[int] = None, + a: np.ndarray, b: np.ndarray, method="pearson", min_periods: Optional[int] = None ): """ a, b: ndarrays diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py index 4ace873f029ae..99c2fefc97ae7 100644 --- a/pandas/core/ops/docstrings.py +++ b/pandas/core/ops/docstrings.py @@ -31,7 +31,7 @@ def _make_flex_doc(op_name, typ): base_doc = _flex_doc_SERIES if op_desc["reverse"]: base_doc += _see_also_reverse_SERIES.format( - reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"], + reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"] ) doc_no_examples = base_doc.format( desc=op_desc["desc"], diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 9e8fb643791f2..299b68c6e71e0 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -500,7 +500,7 @@ def get_result(self): mgrs_indexers.append((obj._mgr, indexers)) new_data = concatenate_block_managers( - mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy, + mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy ) if not self.copy: new_data._consolidate_inplace() diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 64a9e2dbf6d99..969ac56e41860 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -239,7 +239,7 @@ def _add_margins( elif values: marginal_result_set = _generate_marginal_results( - table, data, values, rows, cols, aggfunc, observed, margins_name, + table, data, values, rows, cols, aggfunc, observed, margins_name ) if not isinstance(marginal_result_set, tuple): return marginal_result_set @@ -308,7 +308,7 @@ def _compute_grand_margin(data, values, aggfunc, margins_name: str = "All"): def _generate_marginal_results( - table, data, values, rows, cols, aggfunc, observed, margins_name: str = "All", + table, data, values, rows, cols, aggfunc, observed, margins_name: str = "All" ): if len(cols) > 0: # need to "interleave" the margins diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 391313fbb5283..e81dd8f0c735c 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -81,9 +81,7 @@ class _Unstacker: unstacked : DataFrame """ - def __init__( - self, index: MultiIndex, level=-1, constructor=None, - ): + def __init__(self, index: MultiIndex, level=-1, constructor=None): if constructor is None: constructor = DataFrame @@ -422,7 +420,7 @@ def unstack(obj, level, fill_value=None): if is_extension_array_dtype(obj.dtype): return _unstack_extension_series(obj, level, fill_value) unstacker = _Unstacker( - obj.index, level=level, constructor=obj._constructor_expanddim, + obj.index, level=level, constructor=obj._constructor_expanddim ) return unstacker.get_result( obj.values, value_columns=None, fill_value=fill_value @@ -436,7 +434,7 @@ def _unstack_frame(obj, level, fill_value=None): return obj._constructor(mgr) else: return _Unstacker( - obj.index, level=level, constructor=obj._constructor, + obj.index, level=level, constructor=obj._constructor ).get_result(obj._values, value_columns=obj.columns, fill_value=fill_value) From c413df6d6a85eb411cba39b907fe7a74bdf4be1f Mon Sep 17 00:00:00 2001 From: Johnny Pribyl Date: Fri, 28 Aug 2020 12:39:48 -0600 Subject: [PATCH 349/632] Issue35925 remove more trailing commas (#35959) * pandas/core/series.py * pandas/core/window/ewm.py * pandas/core/window/indexers.py * pandas/core/window/numba_.py * pandas/core/window/rolling.py * pandas/io/formats/css.py * pandas/io/formats/format.py * pandas/io/orc.py --- pandas/core/series.py | 4 ++-- pandas/core/window/ewm.py | 4 ++-- pandas/core/window/indexers.py | 6 +++--- pandas/core/window/numba_.py | 2 +- pandas/core/window/rolling.py | 2 +- pandas/io/formats/css.py | 9 ++------- pandas/io/formats/format.py | 6 +++--- pandas/io/orc.py | 2 +- 8 files changed, 15 insertions(+), 20 deletions(-) diff --git a/pandas/core/series.py b/pandas/core/series.py index 555024ad75f5e..dbc105be3c62b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -962,12 +962,12 @@ def _get_values_tuple(self, key): # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) return self._constructor(self._values[indexer], index=new_index).__finalize__( - self, + self ) def _get_values(self, indexer): try: - return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self,) + return self._constructor(self._mgr.get_slice(indexer)).__finalize__(self) except ValueError: # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index c57c434dd3040..1913b51a68c15 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -362,7 +362,7 @@ def var(self, bias: bool = False, *args, **kwargs): def f(arg): return window_aggregations.ewmcov( - arg, arg, self.com, self.adjust, self.ignore_na, self.min_periods, bias, + arg, arg, self.com, self.adjust, self.ignore_na, self.min_periods, bias ) return self._apply(f) @@ -458,7 +458,7 @@ def _get_corr(X, Y): def _cov(x, y): return window_aggregations.ewmcov( - x, y, self.com, self.adjust, self.ignore_na, self.min_periods, 1, + x, y, self.com, self.adjust, self.ignore_na, self.min_periods, 1 ) x_values = X._prep_values() diff --git a/pandas/core/window/indexers.py b/pandas/core/window/indexers.py index 7c76a8e2a0b22..a21521f4ce8bb 100644 --- a/pandas/core/window/indexers.py +++ b/pandas/core/window/indexers.py @@ -40,7 +40,7 @@ class BaseIndexer: """Base class for window bounds calculations.""" def __init__( - self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs, + self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs ): """ Parameters @@ -105,7 +105,7 @@ def get_window_bounds( ) -> Tuple[np.ndarray, np.ndarray]: return calculate_variable_window_bounds( - num_values, self.window_size, min_periods, center, closed, self.index_array, + num_values, self.window_size, min_periods, center, closed, self.index_array ) @@ -316,7 +316,7 @@ def get_window_bounds( # Cannot use groupby_indicies as they might not be monotonic with the object # we're rolling over window_indicies = np.arange( - window_indicies_start, window_indicies_start + len(indices), + window_indicies_start, window_indicies_start + len(indices) ) window_indicies_start += len(indices) # Extend as we'll be slicing window like [start, end) diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py index 5d35ec7457ab0..aec294c3c84c2 100644 --- a/pandas/core/window/numba_.py +++ b/pandas/core/window/numba_.py @@ -57,7 +57,7 @@ def generate_numba_apply_func( @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_apply( - values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, + values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int ) -> np.ndarray: result = np.empty(len(begin)) for i in loop_range(len(result)): diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index baabdf0fca29a..39fcfcbe2bff6 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2117,7 +2117,7 @@ def count(self): @Substitution(name="rolling") @Appender(_shared_docs["apply"]) def apply( - self, func, raw=False, engine=None, engine_kwargs=None, args=None, kwargs=None, + self, func, raw=False, engine=None, engine_kwargs=None, args=None, kwargs=None ): return super().apply( func, diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py index b40d2a57b8106..4d6f03489725f 100644 --- a/pandas/io/formats/css.py +++ b/pandas/io/formats/css.py @@ -20,9 +20,7 @@ def expand(self, prop, value: str): try: mapping = self.SIDE_SHORTHANDS[len(tokens)] except KeyError: - warnings.warn( - f'Could not expand "{prop}: {value}"', CSSWarning, - ) + warnings.warn(f'Could not expand "{prop}: {value}"', CSSWarning) return for key, idx in zip(self.SIDES, mapping): yield prop_fmt.format(key), tokens[idx] @@ -117,10 +115,7 @@ def __call__(self, declarations_str, inherited=None): props[prop] = self.size_to_pt( props[prop], em_pt=font_size, conversions=self.BORDER_WIDTH_RATIOS ) - for prop in [ - f"margin-{side}", - f"padding-{side}", - ]: + for prop in [f"margin-{side}", f"padding-{side}"]: if prop in props: # TODO: support % props[prop] = self.size_to_pt( diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 81990b3d505e1..461ef6823918e 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -80,7 +80,7 @@ FloatFormatType = Union[str, Callable, "EngFormatter"] ColspaceType = Mapping[Label, Union[str, int]] ColspaceArgType = Union[ - str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]], + str, int, Sequence[Union[str, int]], Mapping[Label, Union[str, int]] ] common_docstring = """ @@ -741,7 +741,7 @@ def _to_str_columns(self) -> List[List[str]]: for i, c in enumerate(frame): fmt_values = self._format_col(i) fmt_values = _make_fixed_width( - fmt_values, self.justify, minimum=col_space.get(c, 0), adj=self.adj, + fmt_values, self.justify, minimum=col_space.get(c, 0), adj=self.adj ) stringified.append(fmt_values) else: @@ -1069,7 +1069,7 @@ def _get_formatted_index(self, frame: "DataFrame") -> List[str]: fmt_index = [ tuple( _make_fixed_width( - list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj, + list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj ) ) for x in fmt_index diff --git a/pandas/io/orc.py b/pandas/io/orc.py index ea79efd0579e5..b556732e4d116 100644 --- a/pandas/io/orc.py +++ b/pandas/io/orc.py @@ -12,7 +12,7 @@ def read_orc( - path: FilePathOrBuffer, columns: Optional[List[str]] = None, **kwargs, + path: FilePathOrBuffer, columns: Optional[List[str]] = None, **kwargs ) -> "DataFrame": """ Load an ORC object from the file path, returning a DataFrame. From 92e4bd5d41071f40b4af481f74881dbb58374510 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 29 Aug 2020 12:39:35 +0100 Subject: [PATCH 350/632] TYP: misc typing cleanups for #32911 (#35954) --- pandas/io/excel/_odswriter.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index 0131240f99cf6..72f3d81b1c662 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -42,7 +42,7 @@ def write_cells( sheet_name: Optional[str] = None, startrow: int = 0, startcol: int = 0, - freeze_panes: Optional[List] = None, + freeze_panes: Optional[Tuple[int, int]] = None, ) -> None: """ Write the frame cells using odf @@ -215,14 +215,17 @@ def _process_style(self, style: Dict[str, Any]) -> str: self.book.styles.addElement(odf_style) return name - def _create_freeze_panes(self, sheet_name: str, freeze_panes: List[int]) -> None: - """Create freeze panes in the sheet + def _create_freeze_panes( + self, sheet_name: str, freeze_panes: Tuple[int, int] + ) -> None: + """ + Create freeze panes in the sheet. Parameters ---------- sheet_name : str Name of the spreadsheet - freeze_panes : list + freeze_panes : tuple of (int, int) Freeze pane location x and y """ from odf.config import ( From b1da4a6dc3c9200ad0b1fa0e384496ee3fdc6f17 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sun, 30 Aug 2020 00:57:01 +0100 Subject: [PATCH 351/632] TYP: misc cleanup in core\generic.py (#35963) --- pandas/core/generic.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fea3efedb6abb..dd7b02d98ad42 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -387,7 +387,7 @@ def _get_block_manager_axis(cls, axis: Axis) -> int: return m - axis return axis - def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]: + def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex]]: # index or columns axis_index = getattr(self, axis) d = dict() @@ -417,10 +417,10 @@ def _get_axis_resolvers(self, axis: str) -> Dict[str, ABCSeries]: d[axis] = dindex return d - def _get_index_resolvers(self) -> Dict[str, ABCSeries]: + def _get_index_resolvers(self) -> Dict[str, Union["Series", MultiIndex]]: from pandas.core.computation.parsing import clean_column_name - d: Dict[str, ABCSeries] = {} + d: Dict[str, Union["Series", MultiIndex]] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) @@ -4703,14 +4703,15 @@ def filter( return self.reindex(**{name: [r for r in items if r in labels]}) elif like: - def f(x): + def f(x) -> bool: + assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: - def f(x): + def f(x) -> bool: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) @@ -6556,7 +6557,10 @@ def replace( regex = True items = list(to_replace.items()) - keys, values = zip(*items) if items else ([], []) + if items: + keys, values = zip(*items) + else: + keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] From 5368397a5fc63de1acb24cc6acc8ce7d061d3063 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 30 Aug 2020 04:59:00 -0700 Subject: [PATCH 352/632] TYP: annotate plotting based on _get_axe_freq (#35960) --- pandas/plotting/_matplotlib/core.py | 7 +++++-- pandas/plotting/_matplotlib/timeseries.py | 21 +++++++++++---------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index b490e07e43753..4d23a5e5fc249 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1,5 +1,5 @@ import re -from typing import List, Optional +from typing import TYPE_CHECKING, List, Optional import warnings from matplotlib.artist import Artist @@ -43,6 +43,9 @@ table, ) +if TYPE_CHECKING: + from matplotlib.axes import Axes + class MPLPlot: """ @@ -1147,7 +1150,7 @@ def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): return lines @classmethod - def _ts_plot(cls, ax, x, data, style=None, **kwds): + def _ts_plot(cls, ax: "Axes", x, data, style=None, **kwds): from pandas.plotting._matplotlib.timeseries import ( _decorate_axes, _maybe_resample, diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 193602e1baf4a..fd89a093d25a4 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -24,14 +24,15 @@ from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod if TYPE_CHECKING: - from pandas import Index, Series # noqa:F401 + from matplotlib.axes import Axes + from pandas import Index, Series # noqa:F401 # --------------------------------------------------------------------- # Plotting functions and monkey patches -def _maybe_resample(series: "Series", ax, kwargs): +def _maybe_resample(series: "Series", ax: "Axes", kwargs): # resample against axes freq if necessary freq, ax_freq = _get_freq(ax, series) @@ -74,7 +75,7 @@ def _is_sup(f1: str, f2: str) -> bool: ) -def _upsample_others(ax, freq, kwargs): +def _upsample_others(ax: "Axes", freq, kwargs): legend = ax.get_legend() lines, labels = _replot_ax(ax, freq, kwargs) _replot_ax(ax, freq, kwargs) @@ -97,7 +98,7 @@ def _upsample_others(ax, freq, kwargs): ax.legend(lines, labels, loc="best", title=title) -def _replot_ax(ax, freq, kwargs): +def _replot_ax(ax: "Axes", freq, kwargs): data = getattr(ax, "_plot_data", None) # clear current axes and data @@ -127,7 +128,7 @@ def _replot_ax(ax, freq, kwargs): return lines, labels -def _decorate_axes(ax, freq, kwargs): +def _decorate_axes(ax: "Axes", freq, kwargs): """Initialize axes for time-series plotting""" if not hasattr(ax, "_plot_data"): ax._plot_data = [] @@ -143,7 +144,7 @@ def _decorate_axes(ax, freq, kwargs): ax.date_axis_info = None -def _get_ax_freq(ax): +def _get_ax_freq(ax: "Axes"): """ Get the freq attribute of the ax object if set. Also checks shared axes (eg when using secondary yaxis, sharex=True @@ -174,7 +175,7 @@ def _get_period_alias(freq) -> Optional[str]: return freq -def _get_freq(ax, series: "Series"): +def _get_freq(ax: "Axes", series: "Series"): # get frequency from data freq = getattr(series.index, "freq", None) if freq is None: @@ -192,7 +193,7 @@ def _get_freq(ax, series: "Series"): return freq, ax_freq -def _use_dynamic_x(ax, data: "FrameOrSeriesUnion") -> bool: +def _use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool: freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) @@ -234,7 +235,7 @@ def _get_index_freq(index: "Index") -> Optional[BaseOffset]: return freq -def _maybe_convert_index(ax, data): +def _maybe_convert_index(ax: "Axes", data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): @@ -264,7 +265,7 @@ def _maybe_convert_index(ax, data): # Do we need the rest for convenience? -def _format_coord(freq, t, y): +def _format_coord(freq, t, y) -> str: time_period = Period(ordinal=int(t), freq=freq) return f"t = {time_period} y = {y:8f}" From 9632f2e4c382d7f682140bcbcf08873fa49abea7 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Sun, 30 Aug 2020 13:00:24 +0100 Subject: [PATCH 353/632] TYP: Remove NDFrame._add_series_or_dataframe_operations (#35957) --- pandas/core/frame.py | 1 - pandas/core/generic.py | 295 +++++++-------------------- pandas/core/series.py | 1 - pandas/core/shared_docs.py | 363 +++++++++++++++++++++++----------- pandas/core/window/common.py | 2 +- pandas/core/window/rolling.py | 4 +- 6 files changed, 323 insertions(+), 343 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 606bd4cc3b52d..95bd757f1994e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9306,7 +9306,6 @@ def _AXIS_NAMES(self) -> Dict[int, str]: DataFrame._add_numeric_operations() -DataFrame._add_series_or_dataframe_operations() ops.add_flex_arithmetic_methods(DataFrame) ops.add_special_arithmetic_methods(DataFrame) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index dd7b02d98ad42..3bad2d6dd18b9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6,7 +6,6 @@ import operator import pickle import re -from textwrap import dedent from typing import ( TYPE_CHECKING, Any, @@ -101,17 +100,22 @@ from pandas.core.missing import find_valid_index from pandas.core.ops import _align_method_FRAME from pandas.core.shared_docs import _shared_docs +from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window from pandas.io.formats import format as fmt from pandas.io.formats.format import DataFrameFormatter, format_percentiles from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: + from pandas._libs.tslibs import BaseOffset + from pandas.core.resample import Resampler from pandas.core.series import Series # noqa: F401 + from pandas.core.window.indexers import BaseIndexer # goal is to be able to define the docs close to function, while still being # able to share +_shared_docs = {**_shared_docs} _shared_doc_kwargs = dict( axes="keywords for axes", klass="Series/DataFrame", @@ -5128,51 +5132,6 @@ def pipe(self, func, *args, **kwargs): """ return com.pipe(self, func, *args, **kwargs) - _shared_docs["aggregate"] = dedent( - """ - Aggregate using one or more operations over the specified axis. - {versionadded} - Parameters - ---------- - func : function, str, list or dict - Function to use for aggregating the data. If a function, must either - work when passed a {klass} or when passed to {klass}.apply. - - Accepted combinations are: - - - function - - string function name - - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` - - dict of axis labels -> functions, function names or list of such. - {axis} - *args - Positional arguments to pass to `func`. - **kwargs - Keyword arguments to pass to `func`. - - Returns - ------- - scalar, Series or DataFrame - - The return can be: - - * scalar : when Series.agg is called with single function - * Series : when DataFrame.agg is called with a single function - * DataFrame : when DataFrame.agg is called with several functions - - Return scalar, Series or DataFrame. - {see_also} - Notes - ----- - `agg` is an alias for `aggregate`. Use the alias. - - In pandas, agg, as most operations just ignores the missing values, - and returns the operation only considering the values that are present. - - A passed user-defined-function will be passed a Series for evaluation. - {examples}""" - ) - # ---------------------------------------------------------------------- # Attribute access @@ -7452,77 +7411,6 @@ def clip( return result - _shared_docs[ - "groupby" - ] = """ - Group %(klass)s using a mapper or by a Series of columns. - - A groupby operation involves some combination of splitting the - object, applying a function, and combining the results. This can be - used to group large amounts of data and compute operations on these - groups. - - Parameters - ---------- - by : mapping, function, label, or list of labels - Used to determine the groups for the groupby. - If ``by`` is a function, it's called on each value of the object's - index. If a dict or Series is passed, the Series or dict VALUES - will be used to determine the groups (the Series' values are first - aligned; see ``.align()`` method). If an ndarray is passed, the - values are used as-is determine the groups. A label or list of - labels may be passed to group by the columns in ``self``. Notice - that a tuple is interpreted as a (single) key. - axis : {0 or 'index', 1 or 'columns'}, default 0 - Split along rows (0) or columns (1). - level : int, level name, or sequence of such, default None - If the axis is a MultiIndex (hierarchical), group by a particular - level or levels. - as_index : bool, default True - For aggregated output, return object with group labels as the - index. Only relevant for DataFrame input. as_index=False is - effectively "SQL-style" grouped output. - sort : bool, default True - Sort group keys. Get better performance by turning this off. - Note this does not influence the order of observations within each - group. Groupby preserves the order of rows within each group. - group_keys : bool, default True - When calling apply, add group keys to index to identify pieces. - squeeze : bool, default False - Reduce the dimensionality of the return type if possible, - otherwise return a consistent type. - - .. deprecated:: 1.1.0 - - observed : bool, default False - This only applies if any of the groupers are Categoricals. - If True: only show observed values for categorical groupers. - If False: show all values for categorical groupers. - - .. versionadded:: 0.23.0 - dropna : bool, default True - If True, and if group keys contain NA values, NA values together - with row/column will be dropped. - If False, NA values will also be treated as the key in groups - - .. versionadded:: 1.1.0 - - Returns - ------- - %(klass)sGroupBy - Returns a groupby object that contains information about the groups. - - See Also - -------- - resample : Convenience method for frequency conversion and resampling - of time series. - - Notes - ----- - See the `user guide - `_ for more. - """ - def asfreq( self: FrameOrSeries, freq, @@ -8431,35 +8319,6 @@ def ranker(data): return ranker(data) - _shared_docs[ - "compare" - ] = """ - Compare to another %(klass)s and show the differences. - - .. versionadded:: 1.1.0 - - Parameters - ---------- - other : %(klass)s - Object to compare with. - - align_axis : {0 or 'index', 1 or 'columns'}, default 1 - Determine which axis to align the comparison on. - - * 0, or 'index' : Resulting differences are stacked vertically - with rows drawn alternately from self and other. - * 1, or 'columns' : Resulting differences are aligned horizontally - with columns drawn alternately from self and other. - - keep_shape : bool, default False - If true, all rows and columns are kept. - Otherwise, only the ones with different values are kept. - - keep_equal : bool, default False - If true, the result keeps values that are equal. - Otherwise, equal values are shown as NaNs. - """ - @Appender(_shared_docs["compare"] % _shared_doc_kwargs) def compare( self, @@ -10589,45 +10448,21 @@ def mad(self, axis=None, skipna=None, level=None): examples=_min_examples, ) - @classmethod - def _add_series_or_dataframe_operations(cls): - """ - Add the series or dataframe only operations to the cls; evaluate - the doc strings again. - """ - from pandas.core.window import ( - Expanding, - ExponentialMovingWindow, - Rolling, - Window, - ) - - @doc(Rolling) - def rolling( - self, - window, - min_periods=None, - center=False, - win_type=None, - on=None, - axis=0, - closed=None, - ): - axis = self._get_axis_number(axis) - - if win_type is not None: - return Window( - self, - window=window, - min_periods=min_periods, - center=center, - win_type=win_type, - on=on, - axis=axis, - closed=closed, - ) + @doc(Rolling) + def rolling( + self, + window: "Union[int, timedelta, BaseOffset, BaseIndexer]", + min_periods: Optional[int] = None, + center: bool_t = False, + win_type: Optional[str] = None, + on: Optional[str] = None, + axis: Axis = 0, + closed: Optional[str] = None, + ): + axis = self._get_axis_number(axis) - return Rolling( + if win_type is not None: + return Window( self, window=window, min_periods=min_periods, @@ -10638,53 +10473,59 @@ def rolling( closed=closed, ) - cls.rolling = rolling - - @doc(Expanding) - def expanding(self, min_periods=1, center=None, axis=0): - axis = self._get_axis_number(axis) - if center is not None: - warnings.warn( - "The `center` argument on `expanding` " - "will be removed in the future", - FutureWarning, - stacklevel=2, - ) - else: - center = False + return Rolling( + self, + window=window, + min_periods=min_periods, + center=center, + win_type=win_type, + on=on, + axis=axis, + closed=closed, + ) - return Expanding(self, min_periods=min_periods, center=center, axis=axis) + @doc(Expanding) + def expanding( + self, min_periods: int = 1, center: Optional[bool_t] = None, axis: Axis = 0 + ) -> Expanding: + axis = self._get_axis_number(axis) + if center is not None: + warnings.warn( + "The `center` argument on `expanding` will be removed in the future", + FutureWarning, + stacklevel=2, + ) + else: + center = False - cls.expanding = expanding + return Expanding(self, min_periods=min_periods, center=center, axis=axis) - @doc(ExponentialMovingWindow) - def ewm( + @doc(ExponentialMovingWindow) + def ewm( + self, + com: Optional[float] = None, + span: Optional[float] = None, + halflife: Optional[Union[float, TimedeltaConvertibleTypes]] = None, + alpha: Optional[float] = None, + min_periods: int = 0, + adjust: bool_t = True, + ignore_na: bool_t = False, + axis: Axis = 0, + times: Optional[Union[str, np.ndarray, FrameOrSeries]] = None, + ) -> ExponentialMovingWindow: + axis = self._get_axis_number(axis) + return ExponentialMovingWindow( self, - com=None, - span=None, - halflife=None, - alpha=None, - min_periods=0, - adjust=True, - ignore_na=False, - axis=0, - times=None, - ): - axis = self._get_axis_number(axis) - return ExponentialMovingWindow( - self, - com=com, - span=span, - halflife=halflife, - alpha=alpha, - min_periods=min_periods, - adjust=adjust, - ignore_na=ignore_na, - axis=axis, - times=times, - ) - - cls.ewm = ewm + com=com, + span=span, + halflife=halflife, + alpha=alpha, + min_periods=min_periods, + adjust=adjust, + ignore_na=ignore_na, + axis=axis, + times=times, + ) @doc(klass=_shared_doc_kwargs["klass"], axis="") def transform(self, func, *args, **kwargs): diff --git a/pandas/core/series.py b/pandas/core/series.py index dbc105be3c62b..a8a2d300fa168 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5000,7 +5000,6 @@ def to_period(self, freq=None, copy=True) -> "Series": Series._add_numeric_operations() -Series._add_series_or_dataframe_operations() # Add arithmetic! ops.add_flex_arithmetic_methods(Series) diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index b81942f062b19..0aaccb47efc44 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -2,117 +2,258 @@ _shared_docs: Dict[str, str] = dict() +_shared_docs[ + "aggregate" +] = """\ +Aggregate using one or more operations over the specified axis. +{versionadded} +Parameters +---------- +func : function, str, list or dict + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. +{axis} +*args + Positional arguments to pass to `func`. +**kwargs + Keyword arguments to pass to `func`. + +Returns +------- +scalar, Series or DataFrame + + The return can be: + + * scalar : when Series.agg is called with single function + * Series : when DataFrame.agg is called with a single function + * DataFrame : when DataFrame.agg is called with several functions + + Return scalar, Series or DataFrame. +{see_also} +Notes +----- +`agg` is an alias for `aggregate`. Use the alias. + +A passed user-defined-function will be passed a Series for evaluation. +{examples}""" + +_shared_docs[ + "compare" +] = """\ +Compare to another %(klass)s and show the differences. + +.. versionadded:: 1.1.0 + +Parameters +---------- +other : %(klass)s + Object to compare with. + +align_axis : {0 or 'index', 1 or 'columns'}, default 1 + Determine which axis to align the comparison on. + + * 0, or 'index' : Resulting differences are stacked vertically + with rows drawn alternately from self and other. + * 1, or 'columns' : Resulting differences are aligned horizontally + with columns drawn alternately from self and other. + +keep_shape : bool, default False + If true, all rows and columns are kept. + Otherwise, only the ones with different values are kept. + +keep_equal : bool, default False + If true, the result keeps values that are equal. + Otherwise, equal values are shown as NaNs. +""" + +_shared_docs[ + "groupby" +] = """\ +Group %(klass)s using a mapper or by a Series of columns. + +A groupby operation involves some combination of splitting the +object, applying a function, and combining the results. This can be +used to group large amounts of data and compute operations on these +groups. + +Parameters +---------- +by : mapping, function, label, or list of labels + Used to determine the groups for the groupby. + If ``by`` is a function, it's called on each value of the object's + index. If a dict or Series is passed, the Series or dict VALUES + will be used to determine the groups (the Series' values are first + aligned; see ``.align()`` method). If an ndarray is passed, the + values are used as-is determine the groups. A label or list of + labels may be passed to group by the columns in ``self``. Notice + that a tuple is interpreted as a (single) key. +axis : {0 or 'index', 1 or 'columns'}, default 0 + Split along rows (0) or columns (1). +level : int, level name, or sequence of such, default None + If the axis is a MultiIndex (hierarchical), group by a particular + level or levels. +as_index : bool, default True + For aggregated output, return object with group labels as the + index. Only relevant for DataFrame input. as_index=False is + effectively "SQL-style" grouped output. +sort : bool, default True + Sort group keys. Get better performance by turning this off. + Note this does not influence the order of observations within each + group. Groupby preserves the order of rows within each group. +group_keys : bool, default True + When calling apply, add group keys to index to identify pieces. +squeeze : bool, default False + Reduce the dimensionality of the return type if possible, + otherwise return a consistent type. + + .. deprecated:: 1.1.0 + +observed : bool, default False + This only applies if any of the groupers are Categoricals. + If True: only show observed values for categorical groupers. + If False: show all values for categorical groupers. + + .. versionadded:: 0.23.0 +dropna : bool, default True + If True, and if group keys contain NA values, NA values together + with row/column will be dropped. + If False, NA values will also be treated as the key in groups + + .. versionadded:: 1.1.0 + +Returns +------- +%(klass)sGroupBy + Returns a groupby object that contains information about the groups. + +See Also +-------- +resample : Convenience method for frequency conversion and resampling + of time series. + +Notes +----- +See the `user guide +`_ for more. +""" _shared_docs[ "melt" -] = """ - Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. - - This function is useful to massage a DataFrame into a format where one - or more columns are identifier variables (`id_vars`), while all other - columns, considered measured variables (`value_vars`), are "unpivoted" to - the row axis, leaving just two non-identifier columns, 'variable' and - 'value'. - %(versionadded)s - Parameters - ---------- - id_vars : tuple, list, or ndarray, optional - Column(s) to use as identifier variables. - value_vars : tuple, list, or ndarray, optional - Column(s) to unpivot. If not specified, uses all columns that - are not set as `id_vars`. - var_name : scalar - Name to use for the 'variable' column. If None it uses - ``frame.columns.name`` or 'variable'. - value_name : scalar, default 'value' - Name to use for the 'value' column. - col_level : int or str, optional - If columns are a MultiIndex then use this level to melt. - ignore_index : bool, default True - If True, original index is ignored. If False, the original index is retained. - Index labels will be repeated as necessary. - - .. versionadded:: 1.1.0 - - Returns - ------- - DataFrame - Unpivoted DataFrame. - - See Also - -------- - %(other)s : Identical method. - pivot_table : Create a spreadsheet-style pivot table as a DataFrame. - DataFrame.pivot : Return reshaped DataFrame organized - by given index / column values. - DataFrame.explode : Explode a DataFrame from list-like - columns to long format. - - Examples - -------- - >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, - ... 'B': {0: 1, 1: 3, 2: 5}, - ... 'C': {0: 2, 1: 4, 2: 6}}) - >>> df - A B C - 0 a 1 2 - 1 b 3 4 - 2 c 5 6 - - >>> %(caller)sid_vars=['A'], value_vars=['B']) - A variable value - 0 a B 1 - 1 b B 3 - 2 c B 5 - - >>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) - A variable value - 0 a B 1 - 1 b B 3 - 2 c B 5 - 3 a C 2 - 4 b C 4 - 5 c C 6 - - The names of 'variable' and 'value' columns can be customized: - - >>> %(caller)sid_vars=['A'], value_vars=['B'], - ... var_name='myVarname', value_name='myValname') - A myVarname myValname - 0 a B 1 - 1 b B 3 - 2 c B 5 - - Original index values can be kept around: - - >>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False) - A variable value - 0 a B 1 - 1 b B 3 - 2 c B 5 - 0 a C 2 - 1 b C 4 - 2 c C 6 - - If you have multi-index columns: - - >>> df.columns = [list('ABC'), list('DEF')] - >>> df - A B C - D E F - 0 a 1 2 - 1 b 3 4 - 2 c 5 6 - - >>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) - A variable value - 0 a B 1 - 1 b B 3 - 2 c B 5 - - >>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) - (A, D) variable_0 variable_1 value - 0 a B E 1 - 1 b B E 3 - 2 c B E 5 - """ +] = """\ +Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. + +This function is useful to massage a DataFrame into a format where one +or more columns are identifier variables (`id_vars`), while all other +columns, considered measured variables (`value_vars`), are "unpivoted" to +the row axis, leaving just two non-identifier columns, 'variable' and +'value'. +%(versionadded)s +Parameters +---------- +id_vars : tuple, list, or ndarray, optional + Column(s) to use as identifier variables. +value_vars : tuple, list, or ndarray, optional + Column(s) to unpivot. If not specified, uses all columns that + are not set as `id_vars`. +var_name : scalar + Name to use for the 'variable' column. If None it uses + ``frame.columns.name`` or 'variable'. +value_name : scalar, default 'value' + Name to use for the 'value' column. +col_level : int or str, optional + If columns are a MultiIndex then use this level to melt. +ignore_index : bool, default True + If True, original index is ignored. If False, the original index is retained. + Index labels will be repeated as necessary. + + .. versionadded:: 1.1.0 + +Returns +------- +DataFrame + Unpivoted DataFrame. + +See Also +-------- +%(other)s : Identical method. +pivot_table : Create a spreadsheet-style pivot table as a DataFrame. +DataFrame.pivot : Return reshaped DataFrame organized + by given index / column values. +DataFrame.explode : Explode a DataFrame from list-like + columns to long format. + +Examples +-------- +>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, +... 'B': {0: 1, 1: 3, 2: 5}, +... 'C': {0: 2, 1: 4, 2: 6}}) +>>> df + A B C +0 a 1 2 +1 b 3 4 +2 c 5 6 + +>>> %(caller)sid_vars=['A'], value_vars=['B']) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 + +>>> %(caller)sid_vars=['A'], value_vars=['B', 'C']) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 +3 a C 2 +4 b C 4 +5 c C 6 + +The names of 'variable' and 'value' columns can be customized: + +>>> %(caller)sid_vars=['A'], value_vars=['B'], +... var_name='myVarname', value_name='myValname') + A myVarname myValname +0 a B 1 +1 b B 3 +2 c B 5 + +Original index values can be kept around: + +>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'], ignore_index=False) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 +0 a C 2 +1 b C 4 +2 c C 6 + +If you have multi-index columns: + +>>> df.columns = [list('ABC'), list('DEF')] +>>> df + A B C + D E F +0 a 1 2 +1 b 3 4 +2 c 5 6 + +>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B']) + A variable value +0 a B 1 +1 b B 3 +2 c B 5 + +>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')]) + (A, D) variable_0 variable_1 value +0 a B E 1 +1 b B E 3 +2 c B E 5 +""" diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 51a067427e867..2f3058db4493b 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -7,9 +7,9 @@ from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries -from pandas.core.generic import _shared_docs from pandas.core.groupby.base import GroupByMixin from pandas.core.indexes.api import MultiIndex +from pandas.core.shared_docs import _shared_docs _shared_docs = dict(**_shared_docs) _doc_template = """ diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 39fcfcbe2bff6..04509a40b98df 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -22,7 +22,7 @@ from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import ArrayLike, Axis, FrameOrSeriesUnion, Label +from pandas._typing import ArrayLike, Axis, FrameOrSeries, FrameOrSeriesUnion, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -159,7 +159,7 @@ class _Window(PandasObject, ShallowMixin, SelectionMixin): def __init__( self, - obj: FrameOrSeriesUnion, + obj: FrameOrSeries, window=None, min_periods: Optional[int] = None, center: bool = False, From 48098cee9af7437879a21a863d5096e40e041f17 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 30 Aug 2020 05:01:51 -0700 Subject: [PATCH 354/632] TYP: Annotations (#35933) --- pandas/core/algorithms.py | 11 +++++++---- pandas/core/arrays/base.py | 12 ++++++++++++ pandas/core/groupby/base.py | 9 +++++++-- pandas/core/indexes/base.py | 7 ++----- pandas/core/indexes/datetimelike.py | 4 +++- pandas/core/indexes/numeric.py | 2 ++ pandas/core/internals/blocks.py | 2 +- pandas/core/internals/managers.py | 4 ++-- 8 files changed, 36 insertions(+), 15 deletions(-) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 2a6e983eff3ee..6d6bb21165814 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -10,7 +10,7 @@ import numpy as np from pandas._libs import Timestamp, algos, hashtable as htable, iNaT, lib -from pandas._typing import AnyArrayLike, ArrayLike, DtypeObj +from pandas._typing import AnyArrayLike, ArrayLike, DtypeObj, FrameOrSeriesUnion from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( @@ -58,7 +58,7 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: - from pandas import Series + from pandas import DataFrame, Series _shared_docs: Dict[str, str] = {} @@ -1101,6 +1101,9 @@ def __init__(self, obj, n: int, keep: str): if self.keep not in ("first", "last", "all"): raise ValueError('keep must be either "first", "last" or "all"') + def compute(self, method: str) -> FrameOrSeriesUnion: + raise NotImplementedError + def nlargest(self): return self.compute("nlargest") @@ -1133,7 +1136,7 @@ class SelectNSeries(SelectN): nordered : Series """ - def compute(self, method): + def compute(self, method: str) -> "Series": n = self.n dtype = self.obj.dtype @@ -1207,7 +1210,7 @@ def __init__(self, obj, n: int, keep: str, columns): columns = list(columns) self.columns = columns - def compute(self, method): + def compute(self, method: str) -> "DataFrame": from pandas import Int64Index diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index d85647edc3b81..8193d65b3b30c 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -1167,6 +1167,10 @@ class ExtensionOpsMixin: with NumPy arrays. """ + @classmethod + def _create_arithmetic_method(cls, op): + raise AbstractMethodError(cls) + @classmethod def _add_arithmetic_ops(cls): cls.__add__ = cls._create_arithmetic_method(operator.add) @@ -1186,6 +1190,10 @@ def _add_arithmetic_ops(cls): cls.__divmod__ = cls._create_arithmetic_method(divmod) cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod) + @classmethod + def _create_comparison_method(cls, op): + raise AbstractMethodError(cls) + @classmethod def _add_comparison_ops(cls): cls.__eq__ = cls._create_comparison_method(operator.eq) @@ -1195,6 +1203,10 @@ def _add_comparison_ops(cls): cls.__le__ = cls._create_comparison_method(operator.le) cls.__ge__ = cls._create_comparison_method(operator.ge) + @classmethod + def _create_logical_method(cls, op): + raise AbstractMethodError(cls) + @classmethod def _add_logical_ops(cls): cls.__and__ = cls._create_logical_method(operator.and_) diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index e71b2f94c8014..999873e7b81e4 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -4,17 +4,22 @@ SeriesGroupBy and the DataFrameGroupBy objects. """ import collections +from typing import List from pandas.core.dtypes.common import is_list_like, is_scalar +from pandas.core.base import PandasObject + OutputKey = collections.namedtuple("OutputKey", ["label", "position"]) -class GroupByMixin: +class GroupByMixin(PandasObject): """ Provide the groupby facilities to the mixed object. """ + _attributes: List[str] + def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. @@ -22,7 +27,7 @@ def _gotitem(self, key, ndim, subset=None): Parameters ---------- key : string / list of selections - ndim : 1,2 + ndim : {1, 2} requested ndim of result subset : object, default None subset to act on diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b1e5d5627e3f6..a07c3328def54 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3541,10 +3541,7 @@ def _join_multi(self, other, how, return_indexers=True): if not overlap: raise ValueError("cannot join with no overlapping index names") - self_is_mi = isinstance(self, ABCMultiIndex) - other_is_mi = isinstance(other, ABCMultiIndex) - - if self_is_mi and other_is_mi: + if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = list(self_names - overlap) @@ -3590,7 +3587,7 @@ def _join_multi(self, other, how, return_indexers=True): # Case where only one index is multi # make the indices into mi's that match flip_order = False - if self_is_mi: + if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index efe1a853a9a76..e7e93068d9175 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -1,7 +1,7 @@ """ Base and utility classes for tseries type pandas objects. """ -from datetime import datetime +from datetime import datetime, tzinfo from typing import Any, List, Optional, TypeVar, Union, cast import numpy as np @@ -630,6 +630,8 @@ class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index): but not PeriodIndex """ + tz: Optional[tzinfo] + # Compat for frequency inference, see GH#23789 _is_monotonic_increasing = Index.is_monotonic_increasing _is_monotonic_decreasing = Index.is_monotonic_decreasing diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 80bb9f10fadd9..cd3f1f51a86d2 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -45,6 +45,8 @@ class NumericIndex(Index): This is an abstract class. """ + _default_dtype: np.dtype + _is_numeric_dtype = True def __new__(cls, data=None, dtype=None, copy=False, name=None): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index a38b47a4c2a25..1b42df1b0147c 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1382,7 +1382,7 @@ def where_func(cond, values, other): cond = cond.swapaxes(axis, 0) mask = np.array([cond[i].all() for i in range(cond.shape[0])], dtype=bool) - result_blocks = [] + result_blocks: List["Block"] = [] for m in [mask, ~mask]: if m.any(): taken = result.take(m.nonzero()[0], axis=axis) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 67ff3b9456ccf..bade891939c84 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -334,7 +334,7 @@ def reduce(self: T, func) -> T: # If 2D, we assume that we're operating column-wise assert self.ndim == 2 - res_blocks = [] + res_blocks: List[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) @@ -728,7 +728,7 @@ def _combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager": indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) - new_blocks = [] + new_blocks: List[Block] = [] for b in blocks: b = b.copy(deep=copy) b.mgr_locs = inv_indexer[b.mgr_locs.indexer] From 972f491cb7fdcc3c1c2cb9f05644128f13457f87 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 30 Aug 2020 06:23:44 -0700 Subject: [PATCH 355/632] TYP: annotate plotting._matplotlib.converter (#35978) --- pandas/plotting/_matplotlib/converter.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index 214a67690d695..3db7c38eced65 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -2,7 +2,7 @@ import datetime as pydt from datetime import datetime, timedelta, tzinfo import functools -from typing import Optional, Tuple +from typing import Any, List, Optional, Tuple from dateutil.relativedelta import relativedelta import matplotlib.dates as dates @@ -144,7 +144,7 @@ def convert(value, unit, axis): return value @staticmethod - def axisinfo(unit, axis): + def axisinfo(unit, axis) -> Optional[units.AxisInfo]: if unit != "time": return None @@ -294,7 +294,7 @@ def try_parse(values): return values @staticmethod - def axisinfo(unit, axis): + def axisinfo(unit: Optional[tzinfo], axis) -> units.AxisInfo: """ Return the :class:`~matplotlib.units.AxisInfo` for *unit*. @@ -473,7 +473,7 @@ def _get_default_annual_spacing(nyears) -> Tuple[int, int]: return (min_spacing, maj_spacing) -def period_break(dates, period): +def period_break(dates: PeriodIndex, period: str) -> np.ndarray: """ Returns the indices where the given period changes. @@ -489,7 +489,7 @@ def period_break(dates, period): return np.nonzero(current - previous)[0] -def has_level_label(label_flags, vmin): +def has_level_label(label_flags: np.ndarray, vmin: float) -> bool: """ Returns true if the ``label_flags`` indicate there is at least one label for this level. @@ -984,18 +984,24 @@ class TimeSeries_DateFormatter(Formatter): ---------- freq : {int, string} Valid frequency specifier. - minor_locator : {False, True} + minor_locator : bool, default False Whether the current formatter should apply to minor ticks (True) or major ticks (False). - dynamic_mode : {True, False} + dynamic_mode : bool, default True Whether the formatter works in dynamic mode or not. """ - def __init__(self, freq, minor_locator=False, dynamic_mode=True, plot_obj=None): + def __init__( + self, + freq, + minor_locator: bool = False, + dynamic_mode: bool = True, + plot_obj=None, + ): freq = to_offset(freq) self.format = None self.freq = freq - self.locs = [] + self.locs: List[Any] = [] # unused, for matplotlib compat self.formatdict = None self.isminor = minor_locator self.isdynamic = dynamic_mode From 5a9ee74d4ec68acfd4cb8403fe22e285b7dd694f Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sun, 30 Aug 2020 18:50:37 +0100 Subject: [PATCH 356/632] TYP: misc typing cleanups for #29116 (#35953) --- pandas/core/aggregation.py | 14 ++++++++------ pandas/core/frame.py | 6 ++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index e2374b81ca13b..7ca68d8289bd5 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -10,6 +10,7 @@ Callable, DefaultDict, Dict, + Iterable, List, Optional, Sequence, @@ -17,14 +18,14 @@ Union, ) -from pandas._typing import AggFuncType, Label +from pandas._typing import AggFuncType, FrameOrSeries, Label from pandas.core.dtypes.common import is_dict_like, is_list_like from pandas.core.base import SpecificationError import pandas.core.common as com from pandas.core.indexes.api import Index -from pandas.core.series import FrameOrSeriesUnion, Series +from pandas.core.series import Series def reconstruct_func( @@ -276,12 +277,13 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any: def relabel_result( - result: FrameOrSeriesUnion, + result: FrameOrSeries, func: Dict[str, List[Union[Callable, str]]], - columns: Tuple, - order: List[int], + columns: Iterable[Label], + order: Iterable[int], ) -> Dict[Label, Series]: - """Internal function to reorder result if relabelling is True for + """ + Internal function to reorder result if relabelling is True for dataframe.agg, and return the reordered result in dict. Parameters: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 95bd757f1994e..4668f264000e7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7415,6 +7415,12 @@ def aggregate(self, func=None, axis=0, *args, **kwargs): if relabeling: # This is to keep the order to columns occurrence unchanged, and also # keep the order of new columns occurrence unchanged + + # For the return values of reconstruct_func, if relabeling is + # False, columns and order will be None. + assert columns is not None + assert order is not None + result_in_dict = relabel_result(result, func, columns, order) result = DataFrame(result_in_dict, index=columns) From 1a576ebf999c8b417578b60d1cb8b659083ce080 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 31 Aug 2020 02:31:22 +0100 Subject: [PATCH 357/632] TYP: check_untyped_defs core.dtypes.cast (#35992) --- pandas/core/indexes/datetimes.py | 7 ++++++- pandas/core/tools/datetimes.py | 4 +--- setup.cfg | 3 --- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index f71fd0d406c54..e66f513e347a9 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -75,7 +75,7 @@ def _new_DatetimeIndex(cls, d): + [ method for method in DatetimeArray._datetimelike_methods - if method not in ("tz_localize",) + if method not in ("tz_localize", "tz_convert") ], DatetimeArray, wrap=True, @@ -228,6 +228,11 @@ class DatetimeIndex(DatetimeTimedeltaMixin): # -------------------------------------------------------------------- # methods that dispatch to array and wrap result in DatetimeIndex + @doc(DatetimeArray.tz_convert) + def tz_convert(self, tz) -> "DatetimeIndex": + arr = self._data.tz_convert(tz) + return type(self)._simple_new(arr, name=self.name) + @doc(DatetimeArray.tz_localize) def tz_localize( self, tz, ambiguous="raise", nonexistent="raise" diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 3c1fe6bacefcf..8fcc5f74ea897 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -307,9 +307,7 @@ def _convert_listlike_datetimes( if not isinstance(arg, (DatetimeArray, DatetimeIndex)): return DatetimeIndex(arg, tz=tz, name=name) if tz == "utc": - # error: Item "DatetimeIndex" of "Union[DatetimeArray, DatetimeIndex]" has - # no attribute "tz_convert" - arg = arg.tz_convert(None).tz_localize(tz) # type: ignore[union-attr] + arg = arg.tz_convert(None).tz_localize(tz) return arg elif is_datetime64_ns_dtype(arg_dtype): diff --git a/setup.cfg b/setup.cfg index aa1535a171f0a..2ba22e5aad3c7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -157,9 +157,6 @@ check_untyped_defs=False [mypy-pandas.core.computation.scope] check_untyped_defs=False -[mypy-pandas.core.dtypes.cast] -check_untyped_defs=False - [mypy-pandas.core.frame] check_untyped_defs=False From e5af011e534fc0de3b7431ce7a146a8adc6e1194 Mon Sep 17 00:00:00 2001 From: Metehan Kutlu Date: Mon, 31 Aug 2020 12:59:17 +0300 Subject: [PATCH 358/632] Issue35925 Remove trailing commas (#35996) * pandas/tests/test_multilevel.py * pandas/tests/test_nanops.py * pandas/.../test_moments_consistency_rolling.py * pandas/tests/window/moments/test_moments_ewm.py * pandas/.../test_moments_rolling.py * pandas/tests/window/test_base_indexer.py * pandas/tests/window/test_pairwise.py * pandas/tests/window/test_rolling.py * pandas/tseries/frequencies.py * pandas/util/_test_decorators.py --- pandas/tests/test_multilevel.py | 2 +- pandas/tests/test_nanops.py | 2 +- .../window/moments/test_moments_consistency_rolling.py | 8 ++++---- pandas/tests/window/moments/test_moments_ewm.py | 4 ++-- pandas/tests/window/moments/test_moments_rolling.py | 8 ++++---- pandas/tests/window/test_base_indexer.py | 6 +++--- pandas/tests/window/test_pairwise.py | 2 +- pandas/tests/window/test_rolling.py | 8 ++++---- pandas/tseries/frequencies.py | 2 +- pandas/util/_test_decorators.py | 4 ++-- 10 files changed, 23 insertions(+), 23 deletions(-) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 724558bd49ea2..274860b3fdb5c 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1846,7 +1846,7 @@ def test_multilevel_index_loc_order(self, dim, keys, expected): # GH 22797 # Try to respect order of keys given for MultiIndex.loc kwargs = {dim: [["c", "a", "a", "b", "b"], [1, 1, 2, 1, 2]]} - df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs,) + df = pd.DataFrame(np.arange(25).reshape(5, 5), **kwargs) exp_index = MultiIndex.from_arrays(expected) if dim == "index": res = df.loc[keys, :] diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 0d60e6e8a978f..c45e4508c6153 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -285,7 +285,7 @@ def test_nansum(self, skipna): def test_nanmean(self, skipna): self.check_funs( - nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False, + nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False ) def test_nanmean_overflow(self): diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index a3de8aa69f840..158b994cf03ae 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -95,7 +95,7 @@ def test_rolling_apply_consistency( with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning, + "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning ) # test consistency between rolling_xyz() and either (a) # rolling_apply of Series.xyz(), or (b) rolling_apply of @@ -107,7 +107,7 @@ def test_rolling_apply_consistency( functions = no_nan_functions + base_functions for (f, require_min_periods, name) in functions: rolling_f = getattr( - x.rolling(window=window, center=center, min_periods=min_periods), name, + x.rolling(window=window, center=center, min_periods=min_periods), name ) if ( @@ -492,7 +492,7 @@ def test_moment_functions_zero_length_pairwise(): df2["a"] = df2["a"].astype("float64") df1_expected = DataFrame( - index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]), + index=pd.MultiIndex.from_product([df1.index, df1.columns]), columns=Index([]) ) df2_expected = DataFrame( index=pd.MultiIndex.from_product( @@ -635,7 +635,7 @@ def test_rolling_consistency(consistency_data, window, min_periods, center): # with empty/0-length Series/DataFrames with warnings.catch_warnings(): warnings.filterwarnings( - "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning, + "ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning ) # test consistency between different rolling_* moments diff --git a/pandas/tests/window/moments/test_moments_ewm.py b/pandas/tests/window/moments/test_moments_ewm.py index 89d46a8bb6cb5..a83bfabc4a048 100644 --- a/pandas/tests/window/moments/test_moments_ewm.py +++ b/pandas/tests/window/moments/test_moments_ewm.py @@ -73,7 +73,7 @@ def simple_wma(s, w): (s1, True, True, [(1.0 - alpha), np.nan, 1.0]), (s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]), (s1, False, True, [(1.0 - alpha), np.nan, alpha]), - (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan],), + (s2, True, False, [np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan]), (s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]), ( s2, @@ -95,7 +95,7 @@ def simple_wma(s, w): alpha * ((1.0 - alpha) ** 2 + alpha), ], ), - (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha],), + (s3, False, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha]), ]: expected = simple_wma(s, Series(w)) result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean() diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py index 81f020fe7de23..da256e80dff7e 100644 --- a/pandas/tests/window/moments/test_moments_rolling.py +++ b/pandas/tests/window/moments/test_moments_rolling.py @@ -150,14 +150,14 @@ def get_result(obj, window, min_periods=None, center=False): series_xp = ( get_result( - series.reindex(list(series.index) + s), window=25, min_periods=minp, + series.reindex(list(series.index) + s), window=25, min_periods=minp ) .shift(-12) .reindex(series.index) ) frame_xp = ( get_result( - frame.reindex(list(frame.index) + s), window=25, min_periods=minp, + frame.reindex(list(frame.index) + s), window=25, min_periods=minp ) .shift(-12) .reindex(frame.index) @@ -169,14 +169,14 @@ def get_result(obj, window, min_periods=None, center=False): else: series_xp = ( get_result( - series.reindex(list(series.index) + s), window=25, min_periods=0, + series.reindex(list(series.index) + s), window=25, min_periods=0 ) .shift(-12) .reindex(series.index) ) frame_xp = ( get_result( - frame.reindex(list(frame.index) + s), window=25, min_periods=0, + frame.reindex(list(frame.index) + s), window=25, min_periods=0 ) .shift(-12) .reindex(frame.index) diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 2300d8dd5529b..ab73e075eed04 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -88,8 +88,8 @@ def get_window_bounds(self, num_values, min_periods, center, closed): @pytest.mark.parametrize( "func,np_func,expected,np_kwargs", [ - ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {},), - ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {},), + ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}), + ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}), ( "max", np.max, @@ -204,7 +204,7 @@ def test_rolling_forward_skewness(constructor): @pytest.mark.parametrize( "func,expected", [ - ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan],), + ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]), ( "corr", [ diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py index e82d4b8cbf770..7425cc5df4c2f 100644 --- a/pandas/tests/window/test_pairwise.py +++ b/pandas/tests/window/test_pairwise.py @@ -195,7 +195,7 @@ def test_cov_mulittindex(self): columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")]) index = range(3) - df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns,) + df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns) result = df.ewm(alpha=0.1).cov() diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 8d72e2cb92ca9..67b20fd2d6daa 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -73,7 +73,7 @@ def test_constructor_with_timedelta_window(window): # GH 15440 n = 10 df = DataFrame( - {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D"), + {"value": np.arange(n)}, index=pd.date_range("2015-12-24", periods=n, freq="D") ) expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3)) @@ -92,7 +92,7 @@ def test_constructor_timedelta_window_and_minperiods(window, raw): # GH 15305 n = 10 df = DataFrame( - {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D"), + {"value": np.arange(n)}, index=pd.date_range("2017-08-08", periods=n, freq="D") ) expected = DataFrame( {"value": np.append([np.NaN, 1.0], np.arange(3.0, 27.0, 3))}, @@ -153,7 +153,7 @@ def test_closed_one_entry(func): def test_closed_one_entry_groupby(func): # GH24718 ser = pd.DataFrame( - data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3), + data={"A": [1, 1, 2], "B": [3, 2, 1]}, index=pd.date_range("2000", periods=3) ) result = getattr( ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func @@ -182,7 +182,7 @@ def test_closed_one_entry_groupby(func): def test_closed_min_max_datetime(input_dtype, func, closed, expected): # see gh-21704 ser = pd.Series( - data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10), + data=np.arange(10).astype(input_dtype), index=pd.date_range("2000", periods=10) ) result = getattr(ser.rolling("3D", closed=closed), func)() diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index f80ff1a53cd69..8ef6dac2862db 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -548,7 +548,7 @@ def is_superperiod(source, target) -> bool: def _maybe_coerce_freq(code) -> str: - """ we might need to coerce a code to a rule_code + """we might need to coerce a code to a rule_code and uppercase it Parameters diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 0dad8c7397e37..ca7b99492bbf7 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -186,10 +186,10 @@ def skip_if_no(package: str, min_version: Optional[str] = None): is_platform_windows(), reason="not used on win32" ) skip_if_has_locale = pytest.mark.skipif( - _skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}", + _skip_if_has_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}" ) skip_if_not_us_locale = pytest.mark.skipif( - _skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}", + _skip_if_not_us_locale(), reason=f"Specific locale is set {locale.getlocale()[0]}" ) skip_if_no_scipy = pytest.mark.skipif( _skip_if_no_scipy(), reason="Missing SciPy requirement" From 0e621bc3818b2dbde859c69eb6d138228488606c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 31 Aug 2020 03:15:04 -0700 Subject: [PATCH 359/632] TYP: annotate plotting._matplotlib.tools (#35968) --- pandas/plotting/_matplotlib/tools.py | 33 +++++++++++++++++++++------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 26b25597ce1a6..4d643ffb734e4 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -1,6 +1,6 @@ # being a bit too dynamic from math import ceil -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Iterable, List, Sequence, Tuple, Union import warnings import matplotlib.table @@ -15,10 +15,13 @@ from pandas.plotting._matplotlib import compat if TYPE_CHECKING: + from matplotlib.axes import Axes + from matplotlib.axis import Axis + from matplotlib.lines import Line2D # noqa:F401 from matplotlib.table import Table -def format_date_labels(ax, rot): +def format_date_labels(ax: "Axes", rot): # mini version of autofmt_xdate for label in ax.get_xticklabels(): label.set_ha("right") @@ -278,7 +281,7 @@ def _subplots( return fig, axes -def _remove_labels_from_axis(axis): +def _remove_labels_from_axis(axis: "Axis"): for t in axis.get_majorticklabels(): t.set_visible(False) @@ -294,7 +297,15 @@ def _remove_labels_from_axis(axis): axis.get_label().set_visible(False) -def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): +def _handle_shared_axes( + axarr: Iterable["Axes"], + nplots: int, + naxes: int, + nrows: int, + ncols: int, + sharex: bool, + sharey: bool, +): if nplots > 1: if compat._mpl_ge_3_2_0(): row_num = lambda x: x.get_subplotspec().rowspan.start @@ -340,7 +351,7 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): _remove_labels_from_axis(ax.yaxis) -def _flatten(axes): +def _flatten(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: if not is_list_like(axes): return np.array([axes]) elif isinstance(axes, (np.ndarray, ABCIndexClass)): @@ -348,7 +359,13 @@ def _flatten(axes): return np.array(axes) -def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): +def _set_ticks_props( + axes: Union["Axes", Sequence["Axes"]], + xlabelsize=None, + xrot=None, + ylabelsize=None, + yrot=None, +): import matplotlib.pyplot as plt for ax in _flatten(axes): @@ -363,7 +380,7 @@ def _set_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=Non return axes -def _get_all_lines(ax): +def _get_all_lines(ax: "Axes") -> List["Line2D"]: lines = ax.get_lines() if hasattr(ax, "right_ax"): @@ -375,7 +392,7 @@ def _get_all_lines(ax): return lines -def _get_xlim(lines) -> Tuple[float, float]: +def _get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: left, right = np.inf, -np.inf for l in lines: x = l.get_xdata(orig=False) From 132e19173265ad95e6b92dc46aa2b045c1073b81 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 31 Aug 2020 03:16:15 -0700 Subject: [PATCH 360/632] TYP: annotations in core.groupby (#35939) --- pandas/core/groupby/categorical.py | 16 ++++++++++---- pandas/core/groupby/generic.py | 34 +++++++++++++----------------- pandas/core/groupby/groupby.py | 6 +++--- pandas/core/groupby/grouper.py | 6 ++++-- pandas/core/groupby/ops.py | 2 +- 5 files changed, 35 insertions(+), 29 deletions(-) diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index db734bb2f0c07..4d5acf527a867 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -1,3 +1,5 @@ +from typing import Optional, Tuple + import numpy as np from pandas.core.algorithms import unique1d @@ -6,9 +8,12 @@ CategoricalDtype, recode_for_categories, ) +from pandas.core.indexes.api import CategoricalIndex -def recode_for_groupby(c: Categorical, sort: bool, observed: bool): +def recode_for_groupby( + c: Categorical, sort: bool, observed: bool +) -> Tuple[Categorical, Optional[Categorical]]: """ Code the categories to ensure we can groupby for categoricals. @@ -73,7 +78,9 @@ def recode_for_groupby(c: Categorical, sort: bool, observed: bool): return c.reorder_categories(cat.categories), None -def recode_from_groupby(c: Categorical, sort: bool, ci): +def recode_from_groupby( + c: Categorical, sort: bool, ci: CategoricalIndex +) -> CategoricalIndex: """ Reverse the codes_to_groupby to account for sort / observed. @@ -91,7 +98,8 @@ def recode_from_groupby(c: Categorical, sort: bool, ci): """ # we re-order to the original category orderings if sort: - return ci.set_categories(c.categories) + return ci.set_categories(c.categories) # type: ignore [attr-defined] # we are not sorting, so add unobserved to the end - return ci.add_categories(c.categories[~c.categories.isin(ci.categories)]) + new_cats = c.categories[~c.categories.isin(ci.categories)] + return ci.add_categories(new_cats) # type: ignore [attr-defined] diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 3172fb4e0e853..e39464628ccaa 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -23,6 +23,7 @@ Type, TypeVar, Union, + cast, ) import warnings @@ -83,7 +84,7 @@ from pandas.plotting import boxplot_frame_groupby if TYPE_CHECKING: - from pandas.core.internals import Block + from pandas.core.internals import Block # noqa:F401 NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"]) @@ -1591,7 +1592,7 @@ def _gotitem(self, key, ndim: int, subset=None): Parameters ---------- key : string / list of selections - ndim : 1,2 + ndim : {1, 2} requested ndim of result subset : object, default None subset to act on @@ -1617,7 +1618,7 @@ def _gotitem(self, key, ndim: int, subset=None): raise AssertionError("invalid ndim for _gotitem") - def _wrap_frame_output(self, result, obj) -> DataFrame: + def _wrap_frame_output(self, result, obj: DataFrame) -> DataFrame: result_index = self.grouper.levels[0] if self.axis == 0: @@ -1634,20 +1635,14 @@ def _get_data_to_aggregate(self) -> BlockManager: else: return obj._mgr - def _insert_inaxis_grouper_inplace(self, result): + def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None: # zip in reverse so we can always insert at loc 0 - izip = zip( - *map( - reversed, - ( - self.grouper.names, - self.grouper.get_group_levels(), - [grp.in_axis for grp in self.grouper.groupings], - ), - ) - ) columns = result.columns - for name, lev, in_axis in izip: + for name, lev, in_axis in zip( + reversed(self.grouper.names), + reversed(self.grouper.get_group_levels()), + reversed([grp.in_axis for grp in self.grouper.groupings]), + ): # GH #28549 # When using .apply(-), name will be in columns already if in_axis and name not in columns: @@ -1712,7 +1707,7 @@ def _wrap_transformed_output( return result - def _wrap_agged_blocks(self, blocks: "Sequence[Block]", items: Index) -> DataFrame: + def _wrap_agged_blocks(self, blocks: Sequence["Block"], items: Index) -> DataFrame: if not self.as_index: index = np.arange(blocks[0].values.shape[-1]) mgr = BlockManager(blocks, axes=[items, index]) @@ -1739,7 +1734,7 @@ def _iterate_column_groupbys(self): exclusions=self.exclusions, ) - def _apply_to_column_groupbys(self, func): + def _apply_to_column_groupbys(self, func) -> DataFrame: from pandas.core.reshape.concat import concat return concat( @@ -1748,7 +1743,7 @@ def _apply_to_column_groupbys(self, func): axis=1, ) - def count(self): + def count(self) -> DataFrame: """ Compute count of group, excluding missing values. @@ -1778,7 +1773,7 @@ def count(self): return self._reindex_output(result, fill_value=0) - def nunique(self, dropna: bool = True): + def nunique(self, dropna: bool = True) -> DataFrame: """ Return DataFrame with counts of unique elements in each position. @@ -1844,6 +1839,7 @@ def nunique(self, dropna: bool = True): ], axis=1, ) + results = cast(DataFrame, results) if axis_number == 1: results = results.T diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index a91366af61d0d..651af2d314251 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -459,7 +459,7 @@ def f(self): @contextmanager -def _group_selection_context(groupby): +def _group_selection_context(groupby: "_GroupBy"): """ Set / reset the _group_selection_context. """ @@ -489,7 +489,7 @@ def __init__( keys: Optional[_KeysArgType] = None, axis: int = 0, level=None, - grouper: "Optional[ops.BaseGrouper]" = None, + grouper: Optional["ops.BaseGrouper"] = None, exclusions=None, selection=None, as_index: bool = True, @@ -734,7 +734,7 @@ def pipe(self, func, *args, **kwargs): plot = property(GroupByPlot) - def _make_wrapper(self, name): + def _make_wrapper(self, name: str) -> Callable: assert name in self._apply_allowlist with _group_selection_context(self): diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 8239a792c65dd..18970ea0544e4 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -568,7 +568,9 @@ def codes(self) -> np.ndarray: @cache_readonly def result_index(self) -> Index: if self.all_grouper is not None: - return recode_from_groupby(self.all_grouper, self.sort, self.group_index) + group_idx = self.group_index + assert isinstance(group_idx, CategoricalIndex) # set in __init__ + return recode_from_groupby(self.all_grouper, self.sort, group_idx) return self.group_index @property @@ -607,7 +609,7 @@ def get_grouper( mutated: bool = False, validate: bool = True, dropna: bool = True, -) -> "Tuple[ops.BaseGrouper, List[Hashable], FrameOrSeries]": +) -> Tuple["ops.BaseGrouper", List[Hashable], FrameOrSeries]: """ Create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 290680f380f5f..4dd5b7f30e7f0 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -82,7 +82,7 @@ class BaseGrouper: def __init__( self, axis: Index, - groupings: "Sequence[grouper.Grouping]", + groupings: Sequence["grouper.Grouping"], sort: bool = True, group_keys: bool = True, mutated: bool = False, From b45327f5fd262e835a1007587c2882c3d77b3158 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Mon, 31 Aug 2020 14:36:12 +0200 Subject: [PATCH 361/632] REGR: Fix inplace updates on column to set correct values (#35936) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/internals/managers.py | 1 + pandas/tests/extension/test_numpy.py | 6 ++++++ pandas/tests/frame/test_block_internals.py | 14 ++++++++++++++ 4 files changed, 22 insertions(+) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 9747a8ef3e71f..b4c196f548147 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -15,6 +15,7 @@ including other versions of pandas. Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) +- Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index bade891939c84..00321b76cb6bf 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1025,6 +1025,7 @@ def iset(self, loc: Union[int, slice, np.ndarray], value): Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ + value = extract_array(value, extract_numpy=True) # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index b9219f9f833de..bbfaacae1b444 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -348,6 +348,12 @@ def test_fillna_frame(self, data_missing): # Non-scalar "scalar" values. super().test_fillna_frame(data_missing) + @pytest.mark.skip("Invalid test") + def test_fillna_fill_other(self, data): + # inplace update doesn't work correctly with patched extension arrays + # extract_array returns PandasArray, while dtype is a numpy dtype + super().test_fillna_fill_other(data_missing) + class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): @pytest.mark.skip("Incorrect parent test") diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 8ecd9066ceff0..00cfa6265934f 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -644,3 +644,17 @@ def test_to_dict_of_blocks_item_cache(): assert df.loc[0, "b"] == "foo" assert df["b"] is ser + + +def test_update_inplace_sets_valid_block_values(): + # https://github.com/pandas-dev/pandas/issues/33457 + df = pd.DataFrame({"a": pd.Series([1, 2, None], dtype="category")}) + + # inplace update of a single column + df["a"].fillna(1, inplace=True) + + # check we havent put a Series into any block.values + assert isinstance(df._mgr.blocks[0].values, pd.Categorical) + + # smoketest for OP bug from GH#35731 + assert df.isnull().sum().sum() == 0 From f46ebe895da4e08e203b1fb8bcf06a7346b86b76 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 31 Aug 2020 16:06:35 +0100 Subject: [PATCH 362/632] TYP: misc typing fixes for pandas\core\frame.py (#35990) * TYP: misc typing fixes for pandas\core\frame.py * correct issue number --- pandas/core/frame.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4668f264000e7..fde83a8393241 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1014,7 +1014,7 @@ def iterrows(self) -> Iterable[Tuple[Label, Series]]: s = klass(v, index=columns, name=k) yield k, s - def itertuples(self, index=True, name="Pandas"): + def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"): """ Iterate over DataFrame rows as namedtuples. @@ -1088,7 +1088,11 @@ def itertuples(self, index=True, name="Pandas"): arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: - itertuple = collections.namedtuple(name, fields, rename=True) + # https://github.com/python/mypy/issues/9046 + # error: namedtuple() expects a string literal as the first argument + itertuple = collections.namedtuple( # type: ignore[misc] + name, fields, rename=True + ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples @@ -4591,7 +4595,7 @@ def set_index( frame = self.copy() arrays = [] - names = [] + names: List[Label] = [] if append: names = list(self.index.names) if isinstance(self.index, MultiIndex): From 6043ba54bea90c16217abcfcd64a25d025381fa9 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 31 Aug 2020 09:27:27 -0700 Subject: [PATCH 363/632] CI: suppress another setuptools warning (#36011) --- pandas/tests/util/test_show_versions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index 04e841c05e44a..fe5fc3e21d960 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -25,6 +25,7 @@ # https://github.com/pandas-dev/pandas/issues/35252 "ignore:Distutils:UserWarning" ) +@pytest.mark.filterwarnings("ignore:Setuptools is replacing distutils:UserWarning") def test_show_versions(capsys): # gh-32041 pd.show_versions() From b8981f411c6d1b4cab08be2272e3be6a59c00d14 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Mon, 31 Aug 2020 13:22:34 -0500 Subject: [PATCH 364/632] TYP: typing errors in _xlsxwriter.py #35994 (#35995) * TYP: typing errors in _xlsxwriter.py #35994 * TYP: add param type * TYP: remove book=None in base class --- pandas/io/excel/_base.py | 1 - pandas/io/excel/_odswriter.py | 2 +- pandas/io/excel/_xlsxwriter.py | 8 +++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 3cd0d721bbdc6..ead36c95556b1 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -653,7 +653,6 @@ def __new__(cls, path, engine=None, **kwargs): return object.__new__(cls) # declare external properties you can count on - book = None curr_sheet = None path = None diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index 72f3d81b1c662..f39391ae1fe7f 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -25,7 +25,7 @@ def __init__( super().__init__(path, mode=mode, **engine_kwargs) - self.book: OpenDocumentSpreadsheet = OpenDocumentSpreadsheet() + self.book = OpenDocumentSpreadsheet() self._style_dict: Dict[str, str] = {} def save(self) -> None: diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index 85a1bb031f457..bdbb006ae93dc 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -1,3 +1,5 @@ +from typing import Dict, List, Tuple + import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter @@ -8,7 +10,7 @@ class _XlsxStyler: # Map from openpyxl-oriented styles to flatter xlsxwriter representation # Ordering necessary for both determinism and because some are keyed by # prefixes of others. - STYLE_MAPPING = { + STYLE_MAPPING: Dict[str, List[Tuple[Tuple[str, ...], str]]] = { "font": [ (("name",), "font_name"), (("sz",), "font_size"), @@ -170,7 +172,7 @@ def __init__( **engine_kwargs, ): # Use the xlsxwriter module as the Excel writer. - import xlsxwriter + from xlsxwriter import Workbook if mode == "a": raise ValueError("Append mode is not supported with xlsxwriter!") @@ -184,7 +186,7 @@ def __init__( **engine_kwargs, ) - self.book = xlsxwriter.Workbook(path, **engine_kwargs) + self.book = Workbook(path, **engine_kwargs) def save(self): """ From 72547697a9a06de47309e896ccce5ef7bc81e1c1 Mon Sep 17 00:00:00 2001 From: Pranjal Bhardwaj <50989807+Bhard27@users.noreply.github.com> Date: Mon, 31 Aug 2020 23:54:04 +0530 Subject: [PATCH 365/632] DOC clean up doc/source/getting_started/overview.rst (#35981) * improved the documentation * Update doc/source/getting_started/overview.rst Co-authored-by: Marco Gorelli * new commit * content changed * new commit Co-authored-by: Marco Gorelli --- doc/source/getting_started/overview.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/getting_started/overview.rst b/doc/source/getting_started/overview.rst index d8a40c5406dee..032ba73a7293d 100644 --- a/doc/source/getting_started/overview.rst +++ b/doc/source/getting_started/overview.rst @@ -9,9 +9,9 @@ Package overview **pandas** is a `Python `__ package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the -fundamental high-level building block for doing practical, **real world** data +fundamental high-level building block for doing practical, **real-world** data analysis in Python. Additionally, it has the broader goal of becoming **the -most powerful and flexible open source data analysis / manipulation tool +most powerful and flexible open source data analysis/manipulation tool available in any language**. It is already well on its way toward this goal. pandas is well suited for many different kinds of data: @@ -21,7 +21,7 @@ pandas is well suited for many different kinds of data: - Ordered and unordered (not necessarily fixed-frequency) time series data. - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels - - Any other form of observational / statistical data sets. The data actually + - Any other form of observational / statistical data sets. The data need not be labeled at all to be placed into a pandas data structure The two primary data structures of pandas, :class:`Series` (1-dimensional) @@ -57,7 +57,7 @@ Here are just a few of the things that pandas does well: Excel files, databases, and saving / loading data from the ultrafast **HDF5 format** - **Time series**-specific functionality: date range generation and frequency - conversion, moving window statistics, date shifting and lagging. + conversion, moving window statistics, date shifting, and lagging. Many of these principles are here to address the shortcomings frequently experienced using other languages / scientific research environments. For data @@ -101,12 +101,12 @@ fashion. Also, we would like sensible default behaviors for the common API functions which take into account the typical orientation of time series and -cross-sectional data sets. When using ndarrays to store 2- and 3-dimensional +cross-sectional data sets. When using the N-dimensional array (ndarrays) to store 2- and 3-dimensional data, a burden is placed on the user to consider the orientation of the data set when writing functions; axes are considered more or less equivalent (except when C- or Fortran-contiguousness matters for performance). In pandas, the axes are intended to lend more semantic meaning to the data; i.e., for a particular -data set there is likely to be a "right" way to orient the data. The goal, +data set, there is likely to be a "right" way to orient the data. The goal, then, is to reduce the amount of mental effort required to code up data transformations in downstream functions. @@ -148,8 +148,8 @@ pandas possible. Thanks to `all of our contributors `. pandas is a `NumFOCUS `__ sponsored project. -This will help ensure the success of development of pandas as a world-class open-source -project, and makes it possible to `donate `__ to the project. +This will help ensure the success of the development of pandas as a world-class open-source +project and makes it possible to `donate `__ to the project. Project governance ------------------ From 8f33d7380e2792c83d089ce12859b202cdd3999b Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 31 Aug 2020 11:28:24 -0700 Subject: [PATCH 366/632] REF: use BlockManager.apply for Rolling.count (#35883) * REF: remove unnecesary try/except * TST: add test for agg on ordered categorical cols (#35630) * TST: resample does not yield empty groups (#10603) (#35799) * revert accidental rebase * REF: use BlockManager.apply for Rolling.count Co-authored-by: Karthik Mathur <22126205+mathurk1@users.noreply.github.com> Co-authored-by: tkmz-n <60312218+tkmz-n@users.noreply.github.com> --- pandas/core/window/rolling.py | 59 ++++++++++------------------------- 1 file changed, 17 insertions(+), 42 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 04509a40b98df..246bf8e6f71b7 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -22,7 +22,7 @@ from pandas._libs.tslibs import BaseOffset, to_offset import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import ArrayLike, Axis, FrameOrSeries, FrameOrSeriesUnion, Label +from pandas._typing import ArrayLike, Axis, FrameOrSeries, FrameOrSeriesUnion from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -44,6 +44,7 @@ ABCSeries, ABCTimedeltaIndex, ) +from pandas.core.dtypes.missing import notna from pandas.core.base import DataError, PandasObject, SelectionMixin, ShallowMixin import pandas.core.common as com @@ -395,40 +396,6 @@ def _wrap_result(self, result, block=None, obj=None): return type(obj)(result, index=index, columns=block.columns) return result - def _wrap_results(self, results, obj, skipped: List[int]) -> FrameOrSeriesUnion: - """ - Wrap the results. - - Parameters - ---------- - results : list of ndarrays - obj : conformed data (may be resampled) - skipped: List[int] - Indices of blocks that are skipped. - """ - from pandas import Series, concat - - if obj.ndim == 1: - if not results: - raise DataError("No numeric types to aggregate") - assert len(results) == 1 - return Series(results[0], index=obj.index, name=obj.name) - - exclude: List[Label] = [] - orig_blocks = list(obj._to_dict_of_blocks(copy=False).values()) - for i in skipped: - exclude.extend(orig_blocks[i].columns) - - columns = [c for c in self._selected_obj.columns if c not in exclude] - if not columns and not len(results) and exclude: - raise DataError("No numeric types to aggregate") - elif not len(results): - return obj.astype("float64") - - df = concat(results, axis=1).reindex(columns=columns, copy=False) - self._insert_on_column(df, obj) - return df - def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"): # if we have an 'on' column we want to put it back into # the results in the same location @@ -1325,21 +1292,29 @@ def count(self): # implementations shouldn't end up here assert not isinstance(self.window, BaseIndexer) - blocks, obj = self._create_blocks(self._selected_obj) - results = [] - for b in blocks: - result = b.notna().astype(int) + _, obj = self._create_blocks(self._selected_obj) + + def hfunc(values: np.ndarray) -> np.ndarray: + result = notna(values) + result = result.astype(int) + frame = type(obj)(result.T) result = self._constructor( - result, + frame, window=self._get_window(), min_periods=self.min_periods or 0, center=self.center, axis=self.axis, closed=self.closed, ).sum() - results.append(result) + return result.values.T - return self._wrap_results(results, obj, skipped=[]) + new_mgr = obj._mgr.apply(hfunc) + out = obj._constructor(new_mgr) + if obj.ndim == 1: + out.name = obj.name + else: + self._insert_on_column(out, obj) + return out _shared_docs["apply"] = dedent( r""" From 2fd8021ad40d715ae2ce177a482eb4c6c2b8a7f5 Mon Sep 17 00:00:00 2001 From: Honfung Wong Date: Tue, 1 Sep 2020 02:40:11 +0800 Subject: [PATCH 367/632] DOC: complement the documentation for pandas.DataFrame.agg #35912 (#35941) * DOC: complement the documentation for pandas.DataFrame.agg * DOC: complement the documentation for pandas.DataFrame.agg reformat the documentation according to PEP 8 * DOC: complement the documentation for pandas.DataFrame.agg reformat the documentation according to PEP 8 --- pandas/core/frame.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index fde83a8393241..312d449e36022 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7380,6 +7380,15 @@ def _gotitem( min 1.0 2.0 sum 12.0 NaN + Aggregate different functions over the columns and rename the index of the resulting + DataFrame. + + >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean)) + A B C + x 7.0 NaN NaN + y NaN 2.0 NaN + z NaN NaN 6.0 + Aggregate over the columns. >>> df.agg("mean", axis="columns") From 54ce729278fcbfc05497e9cf661e8ea66669ba3c Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 31 Aug 2020 13:45:11 -0500 Subject: [PATCH 368/632] Update SparseDtype user guide doc (#35837) * Update SparseDtype user guide doc * Reword --- doc/source/user_guide/sparse.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/user_guide/sparse.rst b/doc/source/user_guide/sparse.rst index ca8e9a2f313f6..35e0e0fb86472 100644 --- a/doc/source/user_guide/sparse.rst +++ b/doc/source/user_guide/sparse.rst @@ -87,14 +87,15 @@ The :attr:`SparseArray.dtype` property stores two pieces of information sparr.dtype -A :class:`SparseDtype` may be constructed by passing each of these +A :class:`SparseDtype` may be constructed by passing only a dtype .. ipython:: python pd.SparseDtype(np.dtype('datetime64[ns]')) -The default fill value for a given NumPy dtype is the "missing" value for that dtype, -though it may be overridden. +in which case a default fill value will be used (for NumPy dtypes this is often the +"missing" value for that dtype). To override this default an explicit fill value may be +passed instead .. ipython:: python From 094c7ca008643dae47fe64345c6cd04e3fc50d6d Mon Sep 17 00:00:00 2001 From: Ben Forbes Date: Tue, 1 Sep 2020 04:47:25 +1000 Subject: [PATCH 369/632] clarify VS installer instructions (#35640) --- doc/source/development/contributing.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index 4ffd1d586a99a..e5c6f77eea3ef 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -204,6 +204,7 @@ You will need `Build Tools for Visual Studio 2017 You DO NOT need to install Visual Studio 2019. You only need "Build Tools for Visual Studio 2019" found by scrolling down to "All downloads" -> "Tools for Visual Studio 2019". + In the installer, select the "C++ build tools" workload. **Mac OS** From a7a7f6c1eac79de7e253eba261a9421740b98cef Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 31 Aug 2020 21:26:34 +0100 Subject: [PATCH 370/632] TYP: check_untyped_defs core.internals.concat (#36008) --- pandas/core/internals/concat.py | 19 ++++++++++--------- setup.cfg | 3 --- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 99a586f056b12..88839d2211f81 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -1,10 +1,11 @@ from collections import defaultdict import copy -from typing import List +from typing import Dict, List import numpy as np from pandas._libs import NaT, internals as libinternals +from pandas._typing import DtypeObj from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import maybe_promote @@ -100,10 +101,10 @@ def _get_mgr_concatenation_plan(mgr, indexers): """ # Calculate post-reindex shape , save for item axis which will be separate # for each block anyway. - mgr_shape = list(mgr.shape) + mgr_shape_list = list(mgr.shape) for ax, indexer in indexers.items(): - mgr_shape[ax] = len(indexer) - mgr_shape = tuple(mgr_shape) + mgr_shape_list[ax] = len(indexer) + mgr_shape = tuple(mgr_shape_list) if 0 in indexers: ax0_indexer = indexers.pop(0) @@ -126,9 +127,9 @@ def _get_mgr_concatenation_plan(mgr, indexers): join_unit_indexers = indexers.copy() - shape = list(mgr_shape) - shape[0] = len(placements) - shape = tuple(shape) + shape_list = list(mgr_shape) + shape_list[0] = len(placements) + shape = tuple(shape_list) if blkno == -1: unit = JoinUnit(None, shape) @@ -374,8 +375,8 @@ def _get_empty_dtype_and_na(join_units): else: dtypes[i] = unit.dtype - upcast_classes = defaultdict(list) - null_upcast_classes = defaultdict(list) + upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list) + null_upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list) for dtype, unit in zip(dtypes, join_units): if dtype is None: continue diff --git a/setup.cfg b/setup.cfg index 2ba22e5aad3c7..c10624d60aaff 100644 --- a/setup.cfg +++ b/setup.cfg @@ -184,9 +184,6 @@ check_untyped_defs=False [mypy-pandas.core.internals.blocks] check_untyped_defs=False -[mypy-pandas.core.internals.concat] -check_untyped_defs=False - [mypy-pandas.core.internals.construction] check_untyped_defs=False From 945a543451236e3e032ba9cb6550ff47c413919c Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Mon, 31 Aug 2020 13:42:05 -0700 Subject: [PATCH 371/632] CLN: window/rolling.py (#35982) * CLN: rolling.py * Use obj._constructor instead Co-authored-by: Matt Roeschke --- pandas/core/window/rolling.py | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 246bf8e6f71b7..a3f60c0bc5098 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -378,23 +378,13 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: return values - def _wrap_result(self, result, block=None, obj=None): + def _wrap_result(self, result: np.ndarray) -> "Series": """ - Wrap a single result. + Wrap a single 1D result. """ - if obj is None: - obj = self._selected_obj - index = obj.index + obj = self._selected_obj - if isinstance(result, np.ndarray): - - if result.ndim == 1: - from pandas import Series - - return Series(result, index, name=obj.name) - - return type(obj)(result, index=index, columns=block.columns) - return result + return obj._constructor(result, obj.index, name=obj.name) def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"): # if we have an 'on' column we want to put it back into @@ -421,7 +411,7 @@ def _insert_on_column(self, result: "DataFrame", obj: "DataFrame"): # insert at the end result[name] = extra_col - def _center_window(self, result, window) -> np.ndarray: + def _center_window(self, result: np.ndarray, window) -> np.ndarray: """ Center the result in the window. """ @@ -480,7 +470,6 @@ def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series": Series version of _apply_blockwise """ _, obj = self._create_blocks(self._selected_obj) - values = obj.values try: values = self._prep_values(obj.values) @@ -502,7 +491,7 @@ def _apply_blockwise( # This isn't quite blockwise, since `blocks` is actually a collection # of homogenenous DataFrames. - blocks, obj = self._create_blocks(self._selected_obj) + _, obj = self._create_blocks(self._selected_obj) mgr = obj._mgr def hfunc(bvalues: ArrayLike) -> ArrayLike: From 668724bbbde70fb13ef43475a293175580927f57 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 31 Aug 2020 21:45:17 +0100 Subject: [PATCH 372/632] TYP: misc typing cleanup for core/computation/expressions.py (#36005) --- pandas/core/computation/expressions.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 05a5538a88772..a9c0cb0571446 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -6,6 +6,7 @@ """ import operator +from typing import List, Set import warnings import numpy as np @@ -21,7 +22,7 @@ import numexpr as ne _TEST_MODE = None -_TEST_RESULT = None +_TEST_RESULT: List[bool] = list() _USE_NUMEXPR = _NUMEXPR_INSTALLED _evaluate = None _where = None @@ -75,7 +76,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: # check for dtype compatibility - dtypes = set() + dtypes: Set[str] = set() for o in [a, b]: # Series implements dtypes, check for dimension count as well if hasattr(o, "dtypes") and o.ndim > 1: @@ -247,25 +248,28 @@ def where(cond, a, b, use_numexpr=True): return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) -def set_test_mode(v=True): +def set_test_mode(v: bool = True) -> None: """ - Keeps track of whether numexpr was used. Stores an additional ``True`` - for every successful use of evaluate with numexpr since the last - ``get_test_result`` + Keeps track of whether numexpr was used. + + Stores an additional ``True`` for every successful use of evaluate with + numexpr since the last ``get_test_result``. """ global _TEST_MODE, _TEST_RESULT _TEST_MODE = v _TEST_RESULT = [] -def _store_test_result(used_numexpr): +def _store_test_result(used_numexpr: bool) -> None: global _TEST_RESULT if used_numexpr: _TEST_RESULT.append(used_numexpr) -def get_test_result(): - """get test result and reset test_results""" +def get_test_result() -> List[bool]: + """ + Get test result and reset test_results. + """ global _TEST_RESULT res = _TEST_RESULT _TEST_RESULT = [] From bcb9e1b52d0dbfff573d390cd226ac132c8650fd Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Mon, 31 Aug 2020 18:31:07 -0400 Subject: [PATCH 373/632] BUG: Attributes are lost when subsetting columns in groupby (#35444) --- doc/source/whatsnew/v1.2.0.rst | 2 ++ pandas/core/groupby/generic.py | 19 +++++++++++-- pandas/tests/groupby/test_groupby.py | 42 ++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 55570341cf4e8..1617bf66c4f04 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -255,6 +255,8 @@ Groupby/resample/rolling - Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising an ``TypeError`` (:issue:`35325`) - Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) - Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`) +- Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) +- Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index e39464628ccaa..7b45a114e548b 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1603,17 +1603,32 @@ def _gotitem(self, key, ndim: int, subset=None): return DataFrameGroupBy( subset, self.grouper, - selection=key, + axis=self.axis, + level=self.level, grouper=self.grouper, exclusions=self.exclusions, + selection=key, as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + squeeze=self.squeeze, observed=self.observed, + mutated=self.mutated, + dropna=self.dropna, ) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy( - subset, selection=key, grouper=self.grouper, observed=self.observed + subset, + level=self.level, + grouper=self.grouper, + selection=key, + sort=self.sort, + group_keys=self.group_keys, + squeeze=self.squeeze, + observed=self.observed, + dropna=self.dropna, ) raise AssertionError("invalid ndim for _gotitem") diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8c51ebf89f5c0..c743058c988b4 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2069,3 +2069,45 @@ def test_group_on_two_row_multiindex_returns_one_tuple_key(): assert len(result) == 1 key = (1, 2) assert (result[key] == expected[key]).all() + + +@pytest.mark.parametrize( + "klass, attr, value", + [ + (DataFrame, "axis", 1), + (DataFrame, "level", "a"), + (DataFrame, "as_index", False), + (DataFrame, "sort", False), + (DataFrame, "group_keys", False), + (DataFrame, "squeeze", True), + (DataFrame, "observed", True), + (DataFrame, "dropna", False), + pytest.param( + Series, + "axis", + 1, + marks=pytest.mark.xfail( + reason="GH 35443: Attribute currently not passed on to series" + ), + ), + (Series, "level", "a"), + (Series, "as_index", False), + (Series, "sort", False), + (Series, "group_keys", False), + (Series, "squeeze", True), + (Series, "observed", True), + (Series, "dropna", False), + ], +) +@pytest.mark.filterwarnings( + "ignore:The `squeeze` parameter is deprecated:FutureWarning" +) +def test_subsetting_columns_keeps_attrs(klass, attr, value): + # GH 9959 - When subsetting columns, don't drop attributes + df = pd.DataFrame({"a": [1], "b": [2], "c": [3]}) + if attr != "axis": + df = df.set_index("a") + + expected = df.groupby("a", **{attr: value}) + result = expected[["b"]] if klass is DataFrame else expected["b"] + assert getattr(result, attr) == getattr(expected, attr) From d7f30b48e48480da875a1240c0888308f13cc51e Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 31 Aug 2020 17:32:33 -0500 Subject: [PATCH 374/632] REGR: Fix comparison broadcasting over array of Intervals (#35938) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/_libs/interval.pyx | 5 +++++ pandas/tests/frame/methods/test_replace.py | 7 +++++++ pandas/tests/scalar/interval/test_arithmetic.py | 12 ++++++++++++ pandas/tests/scalar/interval/test_interval.py | 9 +++++++++ 5 files changed, 34 insertions(+) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index b4c196f548147..c6917d1b50619 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) +- Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 6867e8aba7411..40bd5ad8f5a1f 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -358,6 +358,11 @@ cdef class Interval(IntervalMixin): self_tuple = (self.left, self.right, self.closed) other_tuple = (other.left, other.right, other.closed) return PyObject_RichCompare(self_tuple, other_tuple, op) + elif util.is_array(other): + return np.array( + [PyObject_RichCompare(self, x, op) for x in other], + dtype=bool, + ) return NotImplemented diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 8603bff0587b6..83dfd42ae2a6e 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1581,3 +1581,10 @@ def test_replace_with_compiled_regex(self): result = df.replace({regex: "z"}, regex=True) expected = pd.DataFrame(["z", "b", "c"]) tm.assert_frame_equal(result, expected) + + def test_replace_intervals(self): + # https://github.com/pandas-dev/pandas/issues/35931 + df = pd.DataFrame({"a": [pd.Interval(0, 1), pd.Interval(0, 1)]}) + result = df.replace({"a": {pd.Interval(0, 1): "x"}}) + expected = pd.DataFrame({"a": ["x", "x"]}) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/scalar/interval/test_arithmetic.py b/pandas/tests/scalar/interval/test_arithmetic.py index 5252f1a4d5a24..b4c2b448e252a 100644 --- a/pandas/tests/scalar/interval/test_arithmetic.py +++ b/pandas/tests/scalar/interval/test_arithmetic.py @@ -45,3 +45,15 @@ def test_numeric_interval_add_timedelta_raises(interval, delta): with pytest.raises((TypeError, ValueError), match=msg): delta + interval + + +@pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta]) +def test_timdelta_add_timestamp_interval(klass): + delta = klass(0) + expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01")) + + result = delta + expected + assert result == expected + + result = expected + delta + assert result == expected diff --git a/pandas/tests/scalar/interval/test_interval.py b/pandas/tests/scalar/interval/test_interval.py index a0151bb9ac7bf..8ad9a2c7a9c70 100644 --- a/pandas/tests/scalar/interval/test_interval.py +++ b/pandas/tests/scalar/interval/test_interval.py @@ -2,6 +2,7 @@ import pytest from pandas import Interval, Period, Timedelta, Timestamp +import pandas._testing as tm import pandas.core.common as com @@ -267,3 +268,11 @@ def test_constructor_errors_tz(self, tz_left, tz_right): msg = "left and right must have the same time zone" with pytest.raises(error, match=msg): Interval(left, right) + + def test_equality_comparison_broadcasts_over_array(self): + # https://github.com/pandas-dev/pandas/issues/35931 + interval = Interval(0, 1) + arr = np.array([interval, interval]) + result = interval == arr + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) From b528be68fdfe5400cf53e4b6f2ecade6b01208f6 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 31 Aug 2020 18:20:40 -0700 Subject: [PATCH 375/632] BUG: None in Float64Index raising TypeError, should return False (#35999) --- doc/source/whatsnew/v1.1.2.rst | 2 +- pandas/_libs/index.pyx | 6 +++++- pandas/tests/indexes/numeric/test_indexing.py | 6 ++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c6917d1b50619..9b1ad658d4666 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -31,7 +31,7 @@ Bug fixes - Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) -- +- Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index d6659cc1895b1..569562f5b5037 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -80,7 +80,11 @@ cdef class IndexEngine: values = self._get_index_values() self._check_type(val) - loc = _bin_search(values, val) # .searchsorted(val, side='left') + try: + loc = _bin_search(values, val) # .searchsorted(val, side='left') + except TypeError: + # GH#35788 e.g. val=None with float64 values + raise KeyError(val) if loc >= len(values): raise KeyError(val) if values[loc] != val: diff --git a/pandas/tests/indexes/numeric/test_indexing.py b/pandas/tests/indexes/numeric/test_indexing.py index 473e370c76f8b..508bd2f566507 100644 --- a/pandas/tests/indexes/numeric/test_indexing.py +++ b/pandas/tests/indexes/numeric/test_indexing.py @@ -228,6 +228,12 @@ def test_take_fill_value_ints(self, klass): class TestContains: + @pytest.mark.parametrize("klass", [Float64Index, Int64Index, UInt64Index]) + def test_contains_none(self, klass): + # GH#35788 should return False, not raise TypeError + index = klass([0, 1, 2, 3, 4]) + assert None not in index + def test_contains_float64_nans(self): index = Float64Index([1.0, 2.0, np.nan]) assert np.nan in index From 08bcea6360a28af4538ced463024cae11df0cc39 Mon Sep 17 00:00:00 2001 From: Erfan Nariman <34067903+erfannariman@users.noreply.github.com> Date: Tue, 1 Sep 2020 13:42:03 +0200 Subject: [PATCH 376/632] Removed mypy from pre commit (#35066) --- .pre-commit-config.yaml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b7fd797fb7230..fcd0ecdc9fcd2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,15 +30,3 @@ repos: - id: isort language: python_venv exclude: ^pandas/__init__\.py$|^pandas/core/api\.py$ -- repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.730 - hooks: - - id: mypy - args: - # As long as a some files are excluded from check-untyped-defs - # we have to exclude it from the pre-commit hook as the configuration - # is based on modules but the hook runs on files. - - --no-check-untyped-defs - - --follow-imports - - skip - files: pandas/ From e288b2b0518200cacb4a1aa6244aa43da24f6c0e Mon Sep 17 00:00:00 2001 From: Jonathan Shreckengost Date: Tue, 1 Sep 2020 08:38:27 -0400 Subject: [PATCH 377/632] Comma cleanup for #35925 (#36023) --- pandas/tests/frame/test_analytics.py | 8 ++------ pandas/tests/frame/test_constructors.py | 2 +- pandas/tests/frame/test_reshape.py | 4 ++-- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 52a1e3aae9058..b0ba0d991c9b0 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -86,11 +86,7 @@ def wrapper(x): result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal( - result0, - frame.apply(wrapper), - check_dtype=check_dtype, - rtol=rtol, - atol=atol, + result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol ) # HACK: win32 tm.assert_series_equal( @@ -116,7 +112,7 @@ def wrapper(x): if opname in ["sum", "prod"]: expected = frame.apply(skipna_wrapper, axis=1) tm.assert_series_equal( - result1, expected, check_dtype=False, rtol=rtol, atol=atol, + result1, expected, check_dtype=False, rtol=rtol, atol=atol ) # check dtypes diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c8f5b2b0f6364..0d1004809f7f1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -932,7 +932,7 @@ def test_constructor_mrecarray(self): # from GH3479 assert_fr_equal = functools.partial( - tm.assert_frame_equal, check_index_type=True, check_column_type=True, + tm.assert_frame_equal, check_index_type=True, check_column_type=True ) arrays = [ ("float", np.array([1.5, 2.0])), diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 6a8f1e7c1aca2..d80ebaa09b6a8 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -417,7 +417,7 @@ def test_unstack_mixed_type_name_in_multiindex( result = df.unstack(unstack_idx) expected = pd.DataFrame( - expected_values, columns=expected_columns, index=expected_index, + expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) @@ -807,7 +807,7 @@ def test_unstack_multi_level_cols(self): [["B", "C"], ["B", "D"]], names=["c1", "c2"] ), index=pd.MultiIndex.from_tuples( - [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"], + [[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"] ), ) assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"] From e6ebb5cfff57c08753a5606566bc9cdef4a2d91e Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Tue, 1 Sep 2020 11:06:10 -0500 Subject: [PATCH 378/632] TYP: add type annotation to `_xlwt.py` #36024 (#36025) --- pandas/io/excel/_xlwt.py | 14 +++++++++----- setup.cfg | 3 --- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index 78efe77e9fe2d..e1f72eb533c51 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -1,8 +1,13 @@ +from typing import TYPE_CHECKING, Dict + import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter from pandas.io.excel._util import _validate_freeze_panes +if TYPE_CHECKING: + from xlwt import XFStyle + class _XlwtWriter(ExcelWriter): engine = "xlwt" @@ -29,12 +34,11 @@ def save(self): """ Save workbook to disk. """ - return self.book.save(self.path) + self.book.save(self.path) def write_cells( self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None ): - # Write the frame cells using xlwt. sheet_name = self._get_sheet_name(sheet_name) @@ -49,7 +53,7 @@ def write_cells( wks.set_horz_split_pos(freeze_panes[0]) wks.set_vert_split_pos(freeze_panes[1]) - style_dict = {} + style_dict: Dict[str, XFStyle] = {} for cell in cells: val, fmt = self._value_with_fmt(cell.val) @@ -101,14 +105,14 @@ def _style_to_xlwt( f"{key}: {cls._style_to_xlwt(value, False)}" for key, value in item.items() ] - out = f"{(line_sep).join(it)} " + out = f"{line_sep.join(it)} " return out else: it = [ f"{key} {cls._style_to_xlwt(value, False)}" for key, value in item.items() ] - out = f"{(field_sep).join(it)} " + out = f"{field_sep.join(it)} " return out else: item = f"{item}" diff --git a/setup.cfg b/setup.cfg index c10624d60aaff..2447a91f88f4e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -223,9 +223,6 @@ check_untyped_defs=False [mypy-pandas.io.excel._util] check_untyped_defs=False -[mypy-pandas.io.excel._xlwt] -check_untyped_defs=False - [mypy-pandas.io.formats.console] check_untyped_defs=False From 767f2ab92a10e97f56575c05a8096b6290d8a516 Mon Sep 17 00:00:00 2001 From: Souris Ash Date: Tue, 1 Sep 2020 21:45:25 +0530 Subject: [PATCH 379/632] Removed outdated examples for pd.Interval (pandas-dev#36002) (#36026) --- pandas/_libs/interval.pyx | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 40bd5ad8f5a1f..931ad8326c371 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -291,12 +291,6 @@ cdef class Interval(IntervalMixin): True >>> year_2017.length Timedelta('365 days 00:00:00') - - And also you can create string intervals - - >>> volume_1 = pd.Interval('Ant', 'Dog', closed='both') - >>> 'Bee' in volume_1 - True """ _typ = "interval" __array_priority__ = 1000 From fd6a55f0689a5e62b61e5e673619a0d754183f81 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 1 Sep 2020 17:26:45 +0100 Subject: [PATCH 380/632] TYP: misc typing cleanup in core/indexes/multi.py (#36007) * TYP: misc typing cleanup in core/indexes/multi.py * update per comments --- pandas/core/indexes/multi.py | 46 +++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 14 deletions(-) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index b29c27982f087..f66b009e6d505 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -19,7 +19,7 @@ from pandas._libs import algos as libalgos, index as libindex, lib from pandas._libs.hashtable import duplicated_int64 -from pandas._typing import AnyArrayLike, Scalar +from pandas._typing import AnyArrayLike, Label, Scalar from pandas.compat.numpy import function as nv from pandas.errors import InvalidIndexError, PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly, doc @@ -449,7 +449,12 @@ def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> "MultiInde ) @classmethod - def from_tuples(cls, tuples, sortorder=None, names=None): + def from_tuples( + cls, + tuples, + sortorder: Optional[int] = None, + names: Optional[Sequence[Label]] = None, + ): """ Convert list of tuples to MultiIndex. @@ -490,6 +495,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None): elif is_iterator(tuples): tuples = list(tuples) + arrays: List[Sequence[Label]] if len(tuples) == 0: if names is None: raise TypeError("Cannot infer number of levels from empty list") @@ -700,8 +706,13 @@ def levels(self): return FrozenList(result) def _set_levels( - self, levels, level=None, copy=False, validate=True, verify_integrity=False - ): + self, + levels, + level=None, + copy: bool = False, + validate: bool = True, + verify_integrity: bool = False, + ) -> None: # This is NOT part of the levels property because it should be # externally not allowed to set levels. User beware if you change # _levels directly @@ -719,10 +730,10 @@ def _set_levels( ) else: level_numbers = [self._get_level_number(lev) for lev in level] - new_levels = list(self._levels) + new_levels_list = list(self._levels) for lev_num, lev in zip(level_numbers, levels): - new_levels[lev_num] = ensure_index(lev, copy=copy)._shallow_copy() - new_levels = FrozenList(new_levels) + new_levels_list[lev_num] = ensure_index(lev, copy=copy)._shallow_copy() + new_levels = FrozenList(new_levels_list) if verify_integrity: new_codes = self._verify_integrity(levels=new_levels) @@ -875,8 +886,13 @@ def codes(self): return self._codes def _set_codes( - self, codes, level=None, copy=False, validate=True, verify_integrity=False - ): + self, + codes, + level=None, + copy: bool = False, + validate: bool = True, + verify_integrity: bool = False, + ) -> None: if validate: if level is None and len(codes) != self.nlevels: raise ValueError("Length of codes must match number of levels") @@ -890,11 +906,13 @@ def _set_codes( ) else: level_numbers = [self._get_level_number(lev) for lev in level] - new_codes = list(self._codes) + new_codes_list = list(self._codes) for lev_num, level_codes in zip(level_numbers, codes): lev = self.levels[lev_num] - new_codes[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy) - new_codes = FrozenList(new_codes) + new_codes_list[lev_num] = _coerce_indexer_frozen( + level_codes, lev, copy=copy + ) + new_codes = FrozenList(new_codes_list) if verify_integrity: new_codes = self._verify_integrity(codes=new_codes) @@ -2435,7 +2453,7 @@ def _get_partial_string_timestamp_match_key(self, key): if isinstance(key, str) and self.levels[0]._supports_partial_string_indexing: # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) - key = tuple([key] + [slice(None)] * (len(self.levels) - 1)) + key = (key,) + (slice(None),) * (len(self.levels) - 1) if isinstance(key, tuple): # Convert (..., '2016-01-01', ...) in tuple to @@ -3086,7 +3104,7 @@ def _update_indexer(idxr, indexer=indexer): elif is_list_like(k): # a collection of labels to include from this level (these # are or'd) - indexers = None + indexers: Optional[Int64Index] = None for x in k: try: idxrs = _convert_to_indexer( From 329e1c7d3333931222dc84241d8b2bc84ff8a6c8 Mon Sep 17 00:00:00 2001 From: Anshoo Rajput <57529264+rajanshoo25@users.noreply.github.com> Date: Tue, 1 Sep 2020 22:06:50 +0530 Subject: [PATCH 381/632] remove trailing commas for #35925 (#36029) --- pandas/io/parquet.py | 4 ++-- pandas/io/parsers.py | 4 ---- pandas/io/pytables.py | 12 ++++++------ 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 7f0eef039a1e8..f2ce2f056ce82 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -128,7 +128,7 @@ def write( self.api.parquet.write_table(table, path, compression=compression, **kwargs) def read( - self, path, columns=None, storage_options: StorageOptions = None, **kwargs, + self, path, columns=None, storage_options: StorageOptions = None, **kwargs ): if is_fsspec_url(path) and "filesystem" not in kwargs: import_optional_dependency("fsspec") @@ -218,7 +218,7 @@ def write( ) def read( - self, path, columns=None, storage_options: StorageOptions = None, **kwargs, + self, path, columns=None, storage_options: StorageOptions = None, **kwargs ): if is_fsspec_url(path): fsspec = import_optional_dependency("fsspec") diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 983aa56324083..9ad527684120e 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1967,10 +1967,6 @@ def _do_date_conversions(self, names, data): class CParserWrapper(ParserBase): - """ - - """ - def __init__(self, src, **kwds): self.kwds = kwds kwds = kwds.copy() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f08e0514a68e1..0913627324c48 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2931,7 +2931,7 @@ def read_index_node( # If the index was an empty array write_array_empty() will # have written a sentinel. Here we replace it with the original. if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0: - data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type,) + data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type) kind = _ensure_decoded(node._v_attrs.kind) name = None @@ -4103,7 +4103,7 @@ def create_description( return d def read_coordinates( - self, where=None, start: Optional[int] = None, stop: Optional[int] = None, + self, where=None, start: Optional[int] = None, stop: Optional[int] = None ): """ select coordinates (row numbers) from a table; return the @@ -4374,7 +4374,7 @@ def write_data_chunk( self.table.flush() def delete( - self, where=None, start: Optional[int] = None, stop: Optional[int] = None, + self, where=None, start: Optional[int] = None, stop: Optional[int] = None ): # delete all rows (and return the nrows) @@ -4805,7 +4805,7 @@ def _convert_index(name: str, index: Index, encoding: str, errors: str) -> Index if inferred_type == "date": converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) return IndexCol( - name, converted, "date", _tables().Time32Col(), index_name=index_name, + name, converted, "date", _tables().Time32Col(), index_name=index_name ) elif inferred_type == "string": @@ -4821,13 +4821,13 @@ def _convert_index(name: str, index: Index, encoding: str, errors: str) -> Index elif inferred_type in ["integer", "floating"]: return IndexCol( - name, values=converted, kind=kind, typ=atom, index_name=index_name, + name, values=converted, kind=kind, typ=atom, index_name=index_name ) else: assert isinstance(converted, np.ndarray) and converted.dtype == object assert kind == "object", kind atom = _tables().ObjectAtom() - return IndexCol(name, converted, kind, atom, index_name=index_name,) + return IndexCol(name, converted, kind, atom, index_name=index_name) def _unconvert_index( From 303e40b378b0b3e264a9f55c7f25747cb50485ef Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 09:39:54 -0700 Subject: [PATCH 382/632] TYP: annotate plotting._matplotlib.misc (#36017) --- pandas/plotting/_matplotlib/misc.py | 62 +++++++++++++++++++++------- pandas/plotting/_matplotlib/style.py | 2 +- 2 files changed, 48 insertions(+), 16 deletions(-) diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index bb6530b0f6412..c5e7c55970c3e 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -1,18 +1,27 @@ import random +from typing import TYPE_CHECKING, Dict, List, Optional, Set import matplotlib.lines as mlines import matplotlib.patches as patches import numpy as np +from pandas._typing import Label + from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import _get_standard_colors from pandas.plotting._matplotlib.tools import _set_ticks_props, _subplots +if TYPE_CHECKING: + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from pandas import DataFrame, Series + def scatter_matrix( - frame, + frame: "DataFrame", alpha=0.5, figsize=None, ax=None, @@ -114,7 +123,14 @@ def _get_marker_compat(marker): return marker -def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): +def radviz( + frame: "DataFrame", + class_column, + ax: Optional["Axes"] = None, + color=None, + colormap=None, + **kwds, +) -> "Axes": import matplotlib.pyplot as plt def normalize(series): @@ -130,7 +146,7 @@ def normalize(series): if ax is None: ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) - to_plot = {} + to_plot: Dict[Label, List[List]] = {} colors = _get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) @@ -197,8 +213,14 @@ def normalize(series): def andrews_curves( - frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds -): + frame: "DataFrame", + class_column, + ax: Optional["Axes"] = None, + samples: int = 200, + color=None, + colormap=None, + **kwds, +) -> "Axes": import matplotlib.pyplot as plt def function(amplitudes): @@ -231,7 +253,7 @@ def f(t): classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-np.pi, np.pi, samples) - used_legends = set() + used_legends: Set[str] = set() color_values = _get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color @@ -256,7 +278,13 @@ def f(t): return ax -def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): +def bootstrap_plot( + series: "Series", + fig: Optional["Figure"] = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> "Figure": import matplotlib.pyplot as plt @@ -306,19 +334,19 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): def parallel_coordinates( - frame, + frame: "DataFrame", class_column, cols=None, - ax=None, + ax: Optional["Axes"] = None, color=None, use_columns=False, xticks=None, colormap=None, - axvlines=True, + axvlines: bool = True, axvlines_kwds=None, - sort_labels=False, + sort_labels: bool = False, **kwds, -): +) -> "Axes": import matplotlib.pyplot as plt if axvlines_kwds is None: @@ -333,7 +361,7 @@ def parallel_coordinates( else: df = frame[cols] - used_legends = set() + used_legends: Set[str] = set() ncols = len(df.columns) @@ -385,7 +413,9 @@ def parallel_coordinates( return ax -def lag_plot(series, lag=1, ax=None, **kwds): +def lag_plot( + series: "Series", lag: int = 1, ax: Optional["Axes"] = None, **kwds +) -> "Axes": # workaround because `c='b'` is hardcoded in matplotlib's scatter method import matplotlib.pyplot as plt @@ -402,7 +432,9 @@ def lag_plot(series, lag=1, ax=None, **kwds): return ax -def autocorrelation_plot(series, ax=None, **kwds): +def autocorrelation_plot( + series: "Series", ax: Optional["Axes"] = None, **kwds +) -> "Axes": import matplotlib.pyplot as plt n = len(series) diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 7990bff4f517c..5f1105f0e4233 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -11,7 +11,7 @@ def _get_standard_colors( - num_colors=None, colormap=None, color_type="default", color=None + num_colors=None, colormap=None, color_type: str = "default", color=None ): import matplotlib.pyplot as plt From 6c3c695664f8567477686a610e85cd0367929208 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 10:28:02 -0700 Subject: [PATCH 383/632] TYP: Annotate plotting stacker (#36016) --- pandas/plotting/_matplotlib/boxplot.py | 8 +++- pandas/plotting/_matplotlib/core.py | 66 +++++++++++++------------- pandas/plotting/_matplotlib/hist.py | 7 ++- 3 files changed, 45 insertions(+), 36 deletions(-) diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index b33daf39de37c..01fe98a6f5403 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -1,4 +1,5 @@ from collections import namedtuple +from typing import TYPE_CHECKING import warnings from matplotlib.artist import setp @@ -14,6 +15,9 @@ from pandas.plotting._matplotlib.style import _get_standard_colors from pandas.plotting._matplotlib.tools import _flatten, _subplots +if TYPE_CHECKING: + from matplotlib.axes import Axes + class BoxPlot(LinePlot): _kind = "box" @@ -150,7 +154,7 @@ def _make_plot(self): labels = [pprint_thing(key) for key in range(len(labels))] self._set_ticklabels(ax, labels) - def _set_ticklabels(self, ax, labels): + def _set_ticklabels(self, ax: "Axes", labels): if self.orientation == "vertical": ax.set_xticklabels(labels) else: @@ -292,7 +296,7 @@ def maybe_color_bp(bp, **kwds): if not kwds.get("capprops"): setp(bp["caps"], color=colors[3], alpha=1) - def plot_group(keys, values, ax): + def plot_group(keys, values, ax: "Axes"): keys = [pprint_thing(x) for x in keys] values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] bp = ax.boxplot(values, **kwds) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 4d23a5e5fc249..93ba9bd26630b 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1,5 +1,5 @@ import re -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, List, Optional, Tuple import warnings from matplotlib.artist import Artist @@ -45,6 +45,7 @@ if TYPE_CHECKING: from matplotlib.axes import Axes + from matplotlib.axis import Axis class MPLPlot: @@ -68,16 +69,10 @@ def _kind(self): _pop_attributes = [ "label", "style", - "logy", - "logx", - "loglog", "mark_right", "stacked", ] _attr_defaults = { - "logy": False, - "logx": False, - "loglog": False, "mark_right": True, "stacked": False, } @@ -167,6 +162,9 @@ def __init__( self.legend_handles: List[Artist] = [] self.legend_labels: List[Label] = [] + self.logx = kwds.pop("logx", False) + self.logy = kwds.pop("logy", False) + self.loglog = kwds.pop("loglog", False) for attr in self._pop_attributes: value = kwds.pop(attr, self._attr_defaults.get(attr, None)) setattr(self, attr, value) @@ -283,11 +281,11 @@ def generate(self): def _args_adjust(self): pass - def _has_plotted_object(self, ax): + def _has_plotted_object(self, ax: "Axes") -> bool: """check whether ax has data""" return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0 - def _maybe_right_yaxis(self, ax, axes_num): + def _maybe_right_yaxis(self, ax: "Axes", axes_num): if not self.on_right(axes_num): # secondary axes may be passed via ax kw return self._get_ax_layer(ax) @@ -523,7 +521,7 @@ def _adorn_subplots(self): raise ValueError(msg) self.axes[0].set_title(self.title) - def _apply_axis_properties(self, axis, rot=None, fontsize=None): + def _apply_axis_properties(self, axis: "Axis", rot=None, fontsize=None): """ Tick creation within matplotlib is reasonably expensive and is internally deferred until accessed as Ticks are created/destroyed @@ -540,7 +538,7 @@ def _apply_axis_properties(self, axis, rot=None, fontsize=None): label.set_fontsize(fontsize) @property - def legend_title(self): + def legend_title(self) -> Optional[str]: if not isinstance(self.data.columns, ABCMultiIndex): name = self.data.columns.name if name is not None: @@ -591,7 +589,7 @@ def _make_legend(self): if ax.get_visible(): ax.legend(loc="best") - def _get_ax_legend_handle(self, ax): + def _get_ax_legend_handle(self, ax: "Axes"): """ Take in axes and return ax, legend and handle under different scenarios """ @@ -616,7 +614,7 @@ def plt(self): _need_to_set_index = False - def _get_xticks(self, convert_period=False): + def _get_xticks(self, convert_period: bool = False): index = self.data.index is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time") @@ -646,7 +644,7 @@ def _get_xticks(self, convert_period=False): @classmethod @register_pandas_matplotlib_converters - def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): + def _plot(cls, ax: "Axes", x, y, style=None, is_errorbar: bool = False, **kwds): mask = isna(y) if mask.any(): y = np.ma.array(y) @@ -667,10 +665,10 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): if style is not None: args = (x, y, style) else: - args = (x, y) + args = (x, y) # type:ignore[assignment] return ax.plot(*args, **kwds) - def _get_index_name(self): + def _get_index_name(self) -> Optional[str]: if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names if com.any_not_none(*name): @@ -877,7 +875,7 @@ def _get_subplots(self): ax for ax in self.axes[0].get_figure().get_axes() if isinstance(ax, Subplot) ] - def _get_axes_layout(self): + def _get_axes_layout(self) -> Tuple[int, int]: axes = self._get_subplots() x_set = set() y_set = set() @@ -916,15 +914,15 @@ def __init__(self, data, x, y, **kwargs): self.y = y @property - def nseries(self): + def nseries(self) -> int: return 1 - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): x, y = self.x, self.y ax.set_ylabel(pprint_thing(y)) ax.set_xlabel(pprint_thing(x)) - def _plot_colorbar(self, ax, **kwds): + def _plot_colorbar(self, ax: "Axes", **kwds): # Addresses issues #10611 and #10678: # When plotting scatterplots and hexbinplots in IPython # inline backend the colorbar axis height tends not to @@ -1080,7 +1078,7 @@ def __init__(self, data, **kwargs): if "x_compat" in self.kwds: self.x_compat = bool(self.kwds.pop("x_compat")) - def _is_ts_plot(self): + def _is_ts_plot(self) -> bool: # this is slightly deceptive return not self.x_compat and self.use_index and self._use_dynamic_x() @@ -1139,7 +1137,9 @@ def _make_plot(self): ax.set_xlim(left, right) @classmethod - def _plot(cls, ax, x, y, style=None, column_num=None, stacking_id=None, **kwds): + def _plot( + cls, ax: "Axes", x, y, style=None, column_num=None, stacking_id=None, **kwds + ): # column_num is used to get the target column from plotf in line and # area plots if column_num == 0: @@ -1183,7 +1183,7 @@ def _get_stacking_id(self): return None @classmethod - def _initialize_stacker(cls, ax, stacking_id, n): + def _initialize_stacker(cls, ax: "Axes", stacking_id, n: int): if stacking_id is None: return if not hasattr(ax, "_stacker_pos_prior"): @@ -1194,7 +1194,7 @@ def _initialize_stacker(cls, ax, stacking_id, n): ax._stacker_neg_prior[stacking_id] = np.zeros(n) @classmethod - def _get_stacked_values(cls, ax, stacking_id, values, label): + def _get_stacked_values(cls, ax: "Axes", stacking_id, values, label): if stacking_id is None: return values if not hasattr(ax, "_stacker_pos_prior"): @@ -1213,7 +1213,7 @@ def _get_stacked_values(cls, ax, stacking_id, values, label): ) @classmethod - def _update_stacker(cls, ax, stacking_id, values): + def _update_stacker(cls, ax: "Axes", stacking_id, values): if stacking_id is None: return if (values >= 0).all(): @@ -1221,7 +1221,7 @@ def _update_stacker(cls, ax, stacking_id, values): elif (values <= 0).all(): ax._stacker_neg_prior[stacking_id] += values - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): from matplotlib.ticker import FixedLocator def get_label(i): @@ -1276,7 +1276,7 @@ def __init__(self, data, **kwargs): @classmethod def _plot( cls, - ax, + ax: "Axes", x, y, style=None, @@ -1318,7 +1318,7 @@ def _plot( res = [rect] return res - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): LinePlot._post_plot_logic(self, ax, data) if self.ylim is None: @@ -1372,7 +1372,7 @@ def _args_adjust(self): self.left = np.array(self.left) @classmethod - def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): + def _plot(cls, ax: "Axes", x, y, w, start=0, log=False, **kwds): return ax.bar(x, y, w, bottom=start, log=log, **kwds) @property @@ -1454,7 +1454,7 @@ def _make_plot(self): ) self._add_legend_handle(rect, label, index=i) - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): if self.use_index: str_index = [pprint_thing(key) for key in data.index] else: @@ -1466,7 +1466,7 @@ def _post_plot_logic(self, ax, data): self._decorate_ticks(ax, name, str_index, s_edge, e_edge) - def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): + def _decorate_ticks(self, ax: "Axes", name, ticklabels, start_edge, end_edge): ax.set_xlim((start_edge, end_edge)) if self.xticks is not None: @@ -1489,10 +1489,10 @@ def _start_base(self): return self.left @classmethod - def _plot(cls, ax, x, y, w, start=0, log=False, **kwds): + def _plot(cls, ax: "Axes", x, y, w, start=0, log=False, **kwds): return ax.barh(x, y, w, left=start, log=log, **kwds) - def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge): + def _decorate_ticks(self, ax: "Axes", name, ticklabels, start_edge, end_edge): # horizontal bars ax.set_ylim((start_edge, end_edge)) ax.set_yticks(self.tick_pos) diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index ee41479b3c7c9..ffd46d1b191db 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + import numpy as np from pandas.core.dtypes.common import is_integer, is_list_like @@ -8,6 +10,9 @@ from pandas.plotting._matplotlib.core import LinePlot, MPLPlot from pandas.plotting._matplotlib.tools import _flatten, _set_ticks_props, _subplots +if TYPE_CHECKING: + from matplotlib.axes import Axes + class HistPlot(LinePlot): _kind = "hist" @@ -90,7 +95,7 @@ def _make_plot_keywords(self, kwds, y): kwds["bins"] = self.bins return kwds - def _post_plot_logic(self, ax, data): + def _post_plot_logic(self, ax: "Axes", data): if self.orientation == "horizontal": ax.set_xlabel("Frequency") else: From ecc5015cd2c6e7461a031d08f3672803c181ae70 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Tue, 1 Sep 2020 14:36:20 -0500 Subject: [PATCH 384/632] TYP/CLN: cleanup `_openpyxl.py`, add type annotation #36021 (#36022) --- ci/deps/azure-37-locale_slow.yaml | 2 +- ci/deps/azure-37-minimum_versions.yaml | 2 +- doc/source/getting_started/install.rst | 2 +- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/io/excel/_openpyxl.py | 59 +++++++------------------- setup.cfg | 3 -- 6 files changed, 20 insertions(+), 50 deletions(-) diff --git a/ci/deps/azure-37-locale_slow.yaml b/ci/deps/azure-37-locale_slow.yaml index 8000f3e6b9a9c..fbb1ea671d696 100644 --- a/ci/deps/azure-37-locale_slow.yaml +++ b/ci/deps/azure-37-locale_slow.yaml @@ -18,7 +18,7 @@ dependencies: - lxml - matplotlib=3.0.0 - numpy=1.16.* - - openpyxl=2.5.7 + - openpyxl=2.6.0 - python-dateutil - python-blosc - pytz=2017.3 diff --git a/ci/deps/azure-37-minimum_versions.yaml b/ci/deps/azure-37-minimum_versions.yaml index 05b1957198bc4..31f82f3304db3 100644 --- a/ci/deps/azure-37-minimum_versions.yaml +++ b/ci/deps/azure-37-minimum_versions.yaml @@ -19,7 +19,7 @@ dependencies: - numba=0.46.0 - numexpr=2.6.8 - numpy=1.16.5 - - openpyxl=2.5.7 + - openpyxl=2.6.0 - pytables=3.4.4 - python-dateutil=2.7.3 - pytz=2017.3 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 4c270117e079e..c9ac1b0d284a3 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -274,7 +274,7 @@ html5lib 1.0.1 HTML parser for read_html (see :ref lxml 4.3.0 HTML parser for read_html (see :ref:`note `) matplotlib 2.2.3 Visualization numba 0.46.0 Alternative execution engine for rolling operations -openpyxl 2.5.7 Reading / writing for xlsx files +openpyxl 2.6.0 Reading / writing for xlsx files pandas-gbq 0.12.0 Google Big Query access psycopg2 2.7 PostgreSQL engine for sqlalchemy pyarrow 0.15.0 Parquet, ORC, and feather reading / writing diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 1617bf66c4f04..76bebd4a9a1cb 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -109,7 +109,7 @@ Optional libraries below the lowest tested version may still work, but are not c +-----------------+-----------------+---------+ | numba | 0.46.0 | | +-----------------+-----------------+---------+ -| openpyxl | 2.5.7 | | +| openpyxl | 2.6.0 | X | +-----------------+-----------------+---------+ | pyarrow | 0.15.0 | X | +-----------------+-----------------+---------+ diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index c2730536af8a3..3c67902d41baa 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,4 +1,4 @@ -from typing import List +from typing import TYPE_CHECKING, Dict, List, Optional import numpy as np @@ -8,6 +8,9 @@ from pandas.io.excel._base import ExcelWriter, _BaseExcelReader from pandas.io.excel._util import _validate_freeze_panes +if TYPE_CHECKING: + from openpyxl.descriptors.serialisable import Serialisable + class _OpenpyxlWriter(ExcelWriter): engine = "openpyxl" @@ -22,53 +25,22 @@ def __init__(self, path, engine=None, mode="w", **engine_kwargs): if self.mode == "a": # Load from existing workbook from openpyxl import load_workbook - book = load_workbook(self.path) - self.book = book + self.book = load_workbook(self.path) else: # Create workbook object with default optimized_write=True. self.book = Workbook() if self.book.worksheets: - try: - self.book.remove(self.book.worksheets[0]) - except AttributeError: - - # compat - for openpyxl <= 2.4 - self.book.remove_sheet(self.book.worksheets[0]) + self.book.remove(self.book.worksheets[0]) def save(self): """ Save workbook to disk. """ - return self.book.save(self.path) - - @classmethod - def _convert_to_style(cls, style_dict): - """ - Converts a style_dict to an openpyxl style object. - - Parameters - ---------- - style_dict : style dictionary to convert - """ - from openpyxl.style import Style - - xls_style = Style() - for key, value in style_dict.items(): - for nk, nv in value.items(): - if key == "borders": - ( - xls_style.borders.__getattribute__(nk).__setattr__( - "border_style", nv - ) - ) - else: - xls_style.__getattribute__(key).__setattr__(nk, nv) - - return xls_style + self.book.save(self.path) @classmethod - def _convert_to_style_kwargs(cls, style_dict): + def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, "Serialisable"]: """ Convert a style_dict to a set of kwargs suitable for initializing or updating-on-copy an openpyxl v2 style object. @@ -93,7 +65,7 @@ def _convert_to_style_kwargs(cls, style_dict): """ _style_key_map = {"borders": "border"} - style_kwargs = {} + style_kwargs: Dict[str, Serialisable] = {} for k, v in style_dict.items(): if k in _style_key_map: k = _style_key_map[k] @@ -404,7 +376,7 @@ def write_cells( # Write the frame cells using openpyxl. sheet_name = self._get_sheet_name(sheet_name) - _style_cache = {} + _style_cache: Dict[str, Dict[str, Serialisable]] = {} if sheet_name in self.sheets: wks = self.sheets[sheet_name] @@ -426,7 +398,7 @@ def write_cells( if fmt: xcell.number_format = fmt - style_kwargs = {} + style_kwargs: Optional[Dict[str, Serialisable]] = {} if cell.style: key = str(cell.style) style_kwargs = _style_cache.get(key) @@ -515,16 +487,17 @@ def get_sheet_by_index(self, index: int): def _convert_cell(self, cell, convert_float: bool) -> Scalar: - # TODO: replace with openpyxl constants + from openpyxl.cell.cell import TYPE_BOOL, TYPE_ERROR, TYPE_NUMERIC + if cell.is_date: return cell.value - elif cell.data_type == "e": + elif cell.data_type == TYPE_ERROR: return np.nan - elif cell.data_type == "b": + elif cell.data_type == TYPE_BOOL: return bool(cell.value) elif cell.value is None: return "" # compat with xlrd - elif cell.data_type == "n": + elif cell.data_type == TYPE_NUMERIC: # GH5394 if convert_float: val = int(cell.value) diff --git a/setup.cfg b/setup.cfg index 2447a91f88f4e..c952e01c14bea 100644 --- a/setup.cfg +++ b/setup.cfg @@ -217,9 +217,6 @@ check_untyped_defs=False [mypy-pandas.io.excel._base] check_untyped_defs=False -[mypy-pandas.io.excel._openpyxl] -check_untyped_defs=False - [mypy-pandas.io.excel._util] check_untyped_defs=False From 2a624dc674166607eea14c62ffff198148318f9a Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Tue, 1 Sep 2020 19:17:23 -0400 Subject: [PATCH 385/632] CLN: _wrap_applied_output (#36053) --- pandas/core/groupby/generic.py | 36 ++++++++++++---------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7b45a114e548b..a92e3af0764a7 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1206,7 +1206,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): key_index = self.grouper.result_index if self.as_index else None if isinstance(first_not_none, Series): - # this is to silence a DeprecationWarning # TODO: Remove when default dtype of empty Series is object kwargs = first_not_none._construct_axes_dict() @@ -1218,16 +1217,26 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): v = values[0] - if isinstance(v, (np.ndarray, Index, Series)) or not self.as_index: + if not isinstance(v, (np.ndarray, Index, Series)) and self.as_index: + # values are not series or array-like but scalars + # self._selection_name not passed through to Series as the + # result should not take the name of original selection + # of columns + return self.obj._constructor_sliced(values, index=key_index) + + else: if isinstance(v, Series): - applied_index = self._selected_obj._get_axis(self.axis) all_indexed_same = all_indexes_same((x.index for x in values)) - singular_series = len(values) == 1 and applied_index.nlevels == 1 # GH3596 # provide a reduction (Frame -> Series) if groups are # unique if self.squeeze: + applied_index = self._selected_obj._get_axis(self.axis) + singular_series = ( + len(values) == 1 and applied_index.nlevels == 1 + ) + # assign the name to this series if singular_series: values[0].name = keys[0] @@ -1253,18 +1262,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH 8467 return self._concat_objects(keys, values, not_indexed_same=True) - # GH6124 if the list of Series have a consistent name, - # then propagate that name to the result. - index = v.index.copy() - if index.name is None: - # Only propagate the series name to the result - # if all series have a consistent name. If the - # series do not have a consistent name, do - # nothing. - names = {v.name for v in values} - if len(names) == 1: - index.name = list(names)[0] - # Combine values # vstack+constructor is faster than concat and handles MI-columns stacked_values = np.vstack([np.asarray(v) for v in values]) @@ -1313,13 +1310,6 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return self._reindex_output(result) - # values are not series or array-like but scalars - else: - # self._selection_name not passed through to Series as the - # result should not take the name of original selection - # of columns - return self.obj._constructor_sliced(values, index=key_index) - def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs ): From 4f2251c6e6f5fb71a330d0318f96ce338671f746 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 16:31:09 -0700 Subject: [PATCH 386/632] REF: implement Block._replace_list (#36020) --- pandas/core/internals/blocks.py | 37 +++++++++++++++++++++++++ pandas/core/internals/managers.py | 45 +++++++++++-------------------- 2 files changed, 52 insertions(+), 30 deletions(-) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 1b42df1b0147c..ad388ef3f53b0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -788,6 +788,43 @@ def _replace_single(self, *args, **kwargs): """ no-op on a non-ObjectBlock """ return self if kwargs["inplace"] else self.copy() + def _replace_list( + self, + src_list: List[Any], + dest_list: List[Any], + masks: List[np.ndarray], + inplace: bool = False, + regex: bool = False, + ) -> List["Block"]: + """ + See BlockManager._replace_list docstring. + """ + src_len = len(src_list) - 1 + + rb = [self if inplace else self.copy()] + for i, (src, dest) in enumerate(zip(src_list, dest_list)): + new_rb: List["Block"] = [] + for blk in rb: + m = masks[i][blk.mgr_locs.indexer] + convert = i == src_len # only convert once at the end + result = blk._replace_coerce( + mask=m, + to_replace=src, + value=dest, + inplace=inplace, + convert=convert, + regex=regex, + ) + if m.any() or convert: + if isinstance(result, list): + new_rb.extend(result) + else: + new_rb.append(result) + else: + new_rb.append(blk) + rb = new_rb + return rb + def setitem(self, indexer, value): """ Attempt self.values[indexer] = value, possibly creating a new array. diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 00321b76cb6bf..389252e7ef0f2 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -3,6 +3,7 @@ import operator import re from typing import ( + Any, DefaultDict, Dict, List, @@ -600,8 +601,12 @@ def replace(self, value, **kwargs) -> "BlockManager": return self.apply("replace", value=value, **kwargs) def replace_list( - self, src_list, dest_list, inplace: bool = False, regex: bool = False - ) -> "BlockManager": + self: T, + src_list: List[Any], + dest_list: List[Any], + inplace: bool = False, + regex: bool = False, + ) -> T: """ do a list replace """ inplace = validate_bool_kwarg(inplace, "inplace") @@ -625,34 +630,14 @@ def comp(s: Scalar, mask: np.ndarray, regex: bool = False): masks = [comp(s, mask, regex) for s in src_list] - result_blocks = [] - src_len = len(src_list) - 1 - for blk in self.blocks: - - # its possible to get multiple result blocks here - # replace ALWAYS will return a list - rb = [blk if inplace else blk.copy()] - for i, (s, d) in enumerate(zip(src_list, dest_list)): - new_rb: List[Block] = [] - for b in rb: - m = masks[i][b.mgr_locs.indexer] - convert = i == src_len # only convert once at the end - result = b._replace_coerce( - mask=m, - to_replace=s, - value=d, - inplace=inplace, - convert=convert, - regex=regex, - ) - if m.any() or convert: - new_rb = _extend_blocks(result, new_rb) - else: - new_rb.append(b) - rb = new_rb - result_blocks.extend(rb) - - bm = type(self).from_blocks(result_blocks, self.axes) + bm = self.apply( + "_replace_list", + src_list=src_list, + dest_list=dest_list, + masks=masks, + inplace=inplace, + regex=regex, + ) bm._consolidate_inplace() return bm From bdd5d4c13bccc8fe2c4e4444136119ad69c2efcc Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 16:32:00 -0700 Subject: [PATCH 387/632] BUG: PeriodIndex.get_loc incorrectly raising ValueError instead of KeyError (#36015) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/groupby/grouper.py | 6 +++--- pandas/core/indexes/period.py | 2 +- pandas/tests/indexes/period/test_indexing.py | 16 ++++++++++++++++ 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 76bebd4a9a1cb..407e8ba029ada 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -217,7 +217,7 @@ Interval Indexing ^^^^^^^^ - +- Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`) - - diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 18970ea0544e4..3017521c6a065 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -755,9 +755,9 @@ def is_in_obj(gpr) -> bool: return False try: return gpr is obj[gpr.name] - except (KeyError, IndexError, ValueError): - # TODO: ValueError: Given date string not likely a datetime. - # should be KeyError? + except (KeyError, IndexError): + # IndexError reached in e.g. test_skip_group_keys when we pass + # lambda here return False for i, (gpr, level) in enumerate(zip(keys, levels)): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 11334803d4583..cdb502199c6f1 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -504,7 +504,7 @@ def get_loc(self, key, method=None, tolerance=None): try: asdt, reso = parse_time_string(key, self.freq) - except DateParseError as err: + except (ValueError, DateParseError) as err: # A string with invalid format raise KeyError(f"Cannot interpret '{key}' as period") from err diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index b61d1d903f89a..d2499b85ad181 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -359,6 +359,22 @@ def test_get_loc2(self): ], ) + def test_get_loc_invalid_string_raises_keyerror(self): + # GH#34240 + pi = pd.period_range("2000", periods=3, name="A") + with pytest.raises(KeyError, match="A"): + pi.get_loc("A") + + ser = pd.Series([1, 2, 3], index=pi) + with pytest.raises(KeyError, match="A"): + ser.loc["A"] + + with pytest.raises(KeyError, match="A"): + ser["A"] + + assert "A" not in ser + assert "A" not in pi + class TestGetIndexer: def test_get_indexer(self): From 67429d4acf08f6355888879591dcffeaf9958004 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 2 Sep 2020 00:34:15 +0100 Subject: [PATCH 388/632] CI: Unpin MyPy (#36012) --- environment.yml | 2 +- pandas/_config/config.py | 4 +- pandas/core/common.py | 7 +++- pandas/core/computation/expr.py | 2 +- pandas/core/indexes/frozen.py | 6 ++- pandas/core/resample.py | 3 +- requirements-dev.txt | 2 +- setup.cfg | 71 ++++++++++++++++++++++++++++++--- 8 files changed, 81 insertions(+), 16 deletions(-) diff --git a/environment.yml b/environment.yml index 96f2c8d2086c7..4622aac1dc6f8 100644 --- a/environment.yml +++ b/environment.yml @@ -21,7 +21,7 @@ dependencies: - flake8-comprehensions>=3.1.0 # used by flake8, linting of unnecessary comprehensions - flake8-rst>=0.6.0,<=0.7.0 # linting of code blocks in rst files - isort>=5.2.1 # check that imports are in the right order - - mypy=0.730 + - mypy=0.782 - pycodestyle # used by flake8 # documentation diff --git a/pandas/_config/config.py b/pandas/_config/config.py index fb41b37980b2e..0b802f2cc9e69 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -460,9 +460,7 @@ def register_option( path = key.split(".") for k in path: - # NOTE: tokenize.Name is not a public constant - # error: Module has no attribute "Name" [attr-defined] - if not re.match("^" + tokenize.Name + "$", k): # type: ignore[attr-defined] + if not re.match("^" + tokenize.Name + "$", k): raise ValueError(f"{k} is not a valid identifier") if keyword.iskeyword(k): raise ValueError(f"{k} is a python keyword") diff --git a/pandas/core/common.py b/pandas/core/common.py index e7260a9923ee0..6fd4700ab7f3f 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -9,7 +9,7 @@ from datetime import datetime, timedelta from functools import partial import inspect -from typing import Any, Collection, Iterable, Iterator, List, Union +from typing import Any, Collection, Iterable, Iterator, List, Union, cast import warnings import numpy as np @@ -277,6 +277,11 @@ def maybe_iterable_to_list(obj: Union[Iterable[T], T]) -> Union[Collection[T], T """ if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized): return list(obj) + # error: Incompatible return value type (got + # "Union[pandas.core.common., + # pandas.core.common.1, T]", expected + # "Union[Collection[T], T]") [return-value] + obj = cast(Collection, obj) return obj diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 125ecb0d88036..df71b4fe415f8 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -364,7 +364,7 @@ class BaseExprVisitor(ast.NodeVisitor): unary_ops = _unary_ops_syms unary_op_nodes = "UAdd", "USub", "Invert", "Not" - unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + unary_op_nodes_map = {k: v for k, v in zip(unary_ops, unary_op_nodes)} rewrite_map = { ast.Eq: ast.In, diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 909643d50e9d7..8c4437f2cdeb9 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -103,5 +103,7 @@ def __str__(self) -> str: def __repr__(self) -> str: return f"{type(self).__name__}({str(self)})" - __setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled - pop = append = extend = remove = sort = insert = _disabled + __setitem__ = __setslice__ = _disabled # type: ignore[assignment] + __delitem__ = __delslice__ = _disabled # type: ignore[assignment] + pop = append = extend = _disabled # type: ignore[assignment] + remove = sort = insert = _disabled # type: ignore[assignment] diff --git a/pandas/core/resample.py b/pandas/core/resample.py index fc54128ae5aa6..7b5154756e613 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -966,8 +966,7 @@ def __init__(self, obj, *args, **kwargs): for attr in self._attributes: setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) - # error: Too many arguments for "__init__" of "object" - super().__init__(None) # type: ignore[call-arg] + super().__init__(None) self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True diff --git a/requirements-dev.txt b/requirements-dev.txt index 1fca25c9fecd9..cc3775de3a4ba 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,7 +12,7 @@ flake8<3.8.0 flake8-comprehensions>=3.1.0 flake8-rst>=0.6.0,<=0.7.0 isort>=5.2.1 -mypy==0.730 +mypy==0.782 pycodestyle gitpython gitdb diff --git a/setup.cfg b/setup.cfg index c952e01c14bea..a47bc88d282ab 100644 --- a/setup.cfg +++ b/setup.cfg @@ -127,10 +127,7 @@ show_error_codes = True [mypy-pandas.tests.*] check_untyped_defs=False -[mypy-pandas.conftest] -ignore_errors=True - -[mypy-pandas.tests.tools.test_to_datetime] +[mypy-pandas.conftest,pandas.tests.window.conftest] ignore_errors=True [mypy-pandas._testing] @@ -139,7 +136,22 @@ check_untyped_defs=False [mypy-pandas._version] check_untyped_defs=False -[mypy-pandas.core.arrays.interval] +[mypy-pandas.compat.pickle_compat] +check_untyped_defs=False + +[mypy-pandas.core.apply] +check_untyped_defs=False + +[mypy-pandas.core.arrays.base] +check_untyped_defs=False + +[mypy-pandas.core.arrays.datetimelike] +check_untyped_defs=False + +[mypy-pandas.core.arrays.sparse.array] +check_untyped_defs=False + +[mypy-pandas.core.arrays.string_] check_untyped_defs=False [mypy-pandas.core.base] @@ -151,6 +163,9 @@ check_untyped_defs=False [mypy-pandas.core.computation.expressions] check_untyped_defs=False +[mypy-pandas.core.computation.ops] +check_untyped_defs=False + [mypy-pandas.core.computation.pytables] check_untyped_defs=False @@ -163,6 +178,9 @@ check_untyped_defs=False [mypy-pandas.core.generic] check_untyped_defs=False +[mypy-pandas.core.groupby.base] +check_untyped_defs=False + [mypy-pandas.core.groupby.generic] check_untyped_defs=False @@ -172,15 +190,33 @@ check_untyped_defs=False [mypy-pandas.core.groupby.ops] check_untyped_defs=False +[mypy-pandas.core.indexes.base] +check_untyped_defs=False + +[mypy-pandas.core.indexes.category] +check_untyped_defs=False + +[mypy-pandas.core.indexes.datetimelike] +check_untyped_defs=False + [mypy-pandas.core.indexes.datetimes] check_untyped_defs=False +[mypy-pandas.core.indexes.extension] +check_untyped_defs=False + [mypy-pandas.core.indexes.interval] check_untyped_defs=False [mypy-pandas.core.indexes.multi] check_untyped_defs=False +[mypy-pandas.core.indexes.period] +check_untyped_defs=False + +[mypy-pandas.core.indexes.range] +check_untyped_defs=False + [mypy-pandas.core.internals.blocks] check_untyped_defs=False @@ -190,15 +226,27 @@ check_untyped_defs=False [mypy-pandas.core.internals.managers] check_untyped_defs=False +[mypy-pandas.core.internals.ops] +check_untyped_defs=False + [mypy-pandas.core.missing] check_untyped_defs=False [mypy-pandas.core.ops.docstrings] check_untyped_defs=False +[mypy-pandas.core.resample] +check_untyped_defs=False + +[mypy-pandas.core.reshape.concat] +check_untyped_defs=False + [mypy-pandas.core.reshape.merge] check_untyped_defs=False +[mypy-pandas.core.series] +check_untyped_defs=False + [mypy-pandas.core.strings] check_untyped_defs=False @@ -214,6 +262,9 @@ check_untyped_defs=False [mypy-pandas.io.clipboard] check_untyped_defs=False +[mypy-pandas.io.common] +check_untyped_defs=False + [mypy-pandas.io.excel._base] check_untyped_defs=False @@ -226,6 +277,9 @@ check_untyped_defs=False [mypy-pandas.io.formats.css] check_untyped_defs=False +[mypy-pandas.io.formats.csvs] +check_untyped_defs=False + [mypy-pandas.io.formats.excel] check_untyped_defs=False @@ -264,3 +318,10 @@ check_untyped_defs=False [mypy-pandas.plotting._matplotlib.misc] check_untyped_defs=False + +[mypy-pandas.plotting._misc] +check_untyped_defs=False + +[mypy-pandas.util._decorators] +check_untyped_defs=False + From 79919db18f2569151a68fa414059e0f6cbd35617 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 16:37:08 -0700 Subject: [PATCH 389/632] ENH: vendor typing_extensions (#36000) --- pandas/_vendored/__init__.py | 0 pandas/_vendored/typing_extensions.py | 2466 +++++++++++++++++++++++++ setup.cfg | 9 +- 3 files changed, 2473 insertions(+), 2 deletions(-) create mode 100644 pandas/_vendored/__init__.py create mode 100644 pandas/_vendored/typing_extensions.py diff --git a/pandas/_vendored/__init__.py b/pandas/_vendored/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py new file mode 100644 index 0000000000000..53df8da175a56 --- /dev/null +++ b/pandas/_vendored/typing_extensions.py @@ -0,0 +1,2466 @@ +""" +vendored copy of typing_extensions, copied from +https://raw.githubusercontent.com/python/typing/master/typing_extensions/src_py3/typing_extensions.py + +on 2020-08-30. + +typing_extensions is distributed under the Python Software Foundation License. + +This is not a direct copy/paste of the original file. Changes are: + - this docstring + - ran `black` + - ran `isort` + - edited strings split by black to adhere to pandas style conventions + - AsyncContextManager is defined without `exec` + - python2-style super usages are updated + - replace foo[dot]__class__ with type(foo) + - Change a comment-syntax annotation in a docstring to newer syntax +""" + +# These are used by Protocol implementation +# We use internal typing helpers here, but this significantly reduces +# code duplication. (Also this is only until Protocol is in typing.) +import abc +import collections +import collections.abc as collections_abc +import contextlib +import operator +import sys +import typing +from typing import Callable, Generic, Tuple, TypeVar + +# After PEP 560, internal typing API was substantially reworked. +# This is especially important for Protocol class which uses internal APIs +# quite extensivelly. +PEP_560 = sys.version_info[:3] >= (3, 7, 0) + +if PEP_560: + GenericMeta = TypingMeta = type +else: + from typing import GenericMeta, TypingMeta +OLD_GENERICS = False +try: + from typing import _next_in_mro, _type_check, _type_vars +except ImportError: + OLD_GENERICS = True +try: + from typing import _subs_tree # noqa + + SUBS_TREE = True +except ImportError: + SUBS_TREE = False +try: + from typing import _tp_cache +except ImportError: + + def _tp_cache(x): + return x + + +try: + from typing import _TypingEllipsis, _TypingEmpty +except ImportError: + + class _TypingEllipsis: + pass + + class _TypingEmpty: + pass + + +# The two functions below are copies of typing internal helpers. +# They are needed by _ProtocolMeta + + +def _no_slots_copy(dct): + dict_copy = dict(dct) + if "__slots__" in dict_copy: + for slot in dict_copy["__slots__"]: + dict_copy.pop(slot, None) + return dict_copy + + +def _check_generic(cls, parameters): + if not cls.__parameters__: + raise TypeError("%s is not a generic class" % repr(cls)) + alen = len(parameters) + elen = len(cls.__parameters__) + if alen != elen: + raise TypeError( + "Too %s parameters for %s; actual %s, expected %s" + % ("many" if alen > elen else "few", repr(cls), alen, elen) + ) + + +if hasattr(typing, "_generic_new"): + _generic_new = typing._generic_new +else: + # Note: The '_generic_new(...)' function is used as a part of the + # process of creating a generic type and was added to the typing module + # as of Python 3.5.3. + # + # We've defined '_generic_new(...)' below to exactly match the behavior + # implemented in older versions of 'typing' bundled with Python 3.5.0 to + # 3.5.2. This helps eliminate redundancy when defining collection types + # like 'Deque' later. + # + # See https://github.com/python/typing/pull/308 for more details -- in + # particular, compare and contrast the definition of types like + # 'typing.List' before and after the merge. + + def _generic_new(base_cls, cls, *args, **kwargs): + return base_cls.__new__(cls, *args, **kwargs) + + +# See https://github.com/python/typing/pull/439 +if hasattr(typing, "_geqv"): + from typing import _geqv + + _geqv_defined = True +else: + _geqv = None + _geqv_defined = False + +if sys.version_info[:2] >= (3, 6): + import _collections_abc + + _check_methods_in_mro = _collections_abc._check_methods +else: + + def _check_methods_in_mro(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + + +# Please keep __all__ alphabetized within each category. +__all__ = [ + # Super-special typing primitives. + "ClassVar", + "Final", + "Type", + # ABCs (from collections.abc). + # The following are added depending on presence + # of their non-generic counterparts in stdlib: + # 'Awaitable', + # 'AsyncIterator', + # 'AsyncIterable', + # 'Coroutine', + # 'AsyncGenerator', + # 'AsyncContextManager', + # 'ChainMap', + # Concrete collection types. + "ContextManager", + "Counter", + "Deque", + "DefaultDict", + "TypedDict", + # Structural checks, a.k.a. protocols. + "SupportsIndex", + # One-off things. + "final", + "IntVar", + "Literal", + "NewType", + "overload", + "Text", + "TYPE_CHECKING", +] + +# Annotated relies on substitution trees of pep 560. It will not work for +# versions of typing older than 3.5.3 +HAVE_ANNOTATED = PEP_560 or SUBS_TREE + +if PEP_560: + __all__.extend(["get_args", "get_origin", "get_type_hints"]) + +if HAVE_ANNOTATED: + __all__.append("Annotated") + +# Protocols are hard to backport to the original version of typing 3.5.0 +HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0) + +if HAVE_PROTOCOLS: + __all__.extend(["Protocol", "runtime", "runtime_checkable"]) + + +# TODO +if hasattr(typing, "NoReturn"): + NoReturn = typing.NoReturn +elif hasattr(typing, "_FinalTypingBase"): + + class _NoReturn(typing._FinalTypingBase, _root=True): + """Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + """ + + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("NoReturn cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("NoReturn cannot be used with issubclass().") + + NoReturn = _NoReturn(_root=True) +else: + + class _NoReturnMeta(typing.TypingMeta): + """Metaclass for NoReturn""" + + def __new__(cls, name, bases, namespace, _root=False): + return super().__new__(cls, name, bases, namespace, _root=_root) + + def __instancecheck__(self, obj): + raise TypeError("NoReturn cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("NoReturn cannot be used with issubclass().") + + class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True): + """Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + """ + + __slots__ = () + + +# Some unconstrained type variables. These are used by the container types. +# (These are not for export.) +T = typing.TypeVar("T") # Any type. +KT = typing.TypeVar("KT") # Key type. +VT = typing.TypeVar("VT") # Value type. +T_co = typing.TypeVar("T_co", covariant=True) # Any type covariant containers. +V_co = typing.TypeVar("V_co", covariant=True) # Any type covariant containers. +VT_co = typing.TypeVar("VT_co", covariant=True) # Value type covariant containers. +T_contra = typing.TypeVar("T_contra", contravariant=True) # Ditto contravariant. + + +if hasattr(typing, "ClassVar"): + ClassVar = typing.ClassVar +elif hasattr(typing, "_FinalTypingBase"): + + class _ClassVar(typing._FinalTypingBase, _root=True): + """Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + """ + + __slots__ = ("__type__",) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls( + typing._type_check( + item, "{} accepts only single type.".format(cls.__name__[1:]) + ), + _root=True, + ) + raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += "[{}]".format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _ClassVar): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + ClassVar = _ClassVar(_root=True) +else: + + class _ClassVarMeta(typing.TypingMeta): + """Metaclass for ClassVar""" + + def __new__(cls, name, bases, namespace, tp=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if tp is not None: + self.__type__ = tp + return self + + def __instancecheck__(self, obj): + raise TypeError("ClassVar cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("ClassVar cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is not None: + raise TypeError( + "{} cannot be further subscripted".format(cls.__name__[1:]) + ) + + param = typing._type_check( + item, "{} accepts only single type.".format(cls.__name__[1:]) + ) + return cls( + self.__name__, self.__bases__, dict(self.__dict__), tp=param, _root=True + ) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)( + self.__name__, + self.__bases__, + dict(self.__dict__), + tp=self.__type__, + _root=True, + ) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += "[{}]".format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, ClassVar): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True): + """Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + """ + + __type__ = None + + +# On older versions of typing there is an internal class named "Final". +if hasattr(typing, "Final") and sys.version_info[:2] >= (3, 7): + Final = typing.Final +elif sys.version_info[:2] >= (3, 7): + + class _FinalForm(typing._SpecialForm, _root=True): + def __repr__(self): + return "typing_extensions." + self._name + + def __getitem__(self, parameters): + item = typing._type_check( + parameters, "{} accepts only single type".format(self._name) + ) + return _GenericAlias(self, (item,)) + + Final = _FinalForm( + "Final", + doc="""A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties.""", + ) +elif hasattr(typing, "_FinalTypingBase"): + + class _Final(typing._FinalTypingBase, _root=True): + """A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + + __slots__ = ("__type__",) + + def __init__(self, tp=None, **kwds): + self.__type__ = tp + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is None: + return cls( + typing._type_check( + item, "{} accepts only single type.".format(cls.__name__[1:]) + ), + _root=True, + ) + raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)(new_tp, _root=True) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += "[{}]".format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, _Final): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + Final = _Final(_root=True) +else: + + class _FinalMeta(typing.TypingMeta): + """Metaclass for Final""" + + def __new__(cls, name, bases, namespace, tp=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if tp is not None: + self.__type__ = tp + return self + + def __instancecheck__(self, obj): + raise TypeError("Final cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Final cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__type__ is not None: + raise TypeError( + "{} cannot be further subscripted".format(cls.__name__[1:]) + ) + + param = typing._type_check( + item, "{} accepts only single type.".format(cls.__name__[1:]) + ) + return cls( + self.__name__, self.__bases__, dict(self.__dict__), tp=param, _root=True + ) + + def _eval_type(self, globalns, localns): + new_tp = typing._eval_type(self.__type__, globalns, localns) + if new_tp == self.__type__: + return self + return type(self)( + self.__name__, + self.__bases__, + dict(self.__dict__), + tp=self.__type__, + _root=True, + ) + + def __repr__(self): + r = super().__repr__() + if self.__type__ is not None: + r += "[{}]".format(typing._type_repr(self.__type__)) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__type__)) + + def __eq__(self, other): + if not isinstance(other, Final): + return NotImplemented + if self.__type__ is not None: + return self.__type__ == other.__type__ + return self is other + + class Final(typing.Final, metaclass=_FinalMeta, _root=True): + """A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + """ + + __type__ = None + + +if hasattr(typing, "final"): + final = typing.final +else: + + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. + """ + return f + + +def IntVar(name): + return TypeVar(name) + + +if hasattr(typing, "Literal"): + Literal = typing.Literal +elif sys.version_info[:2] >= (3, 7): + + class _LiteralForm(typing._SpecialForm, _root=True): + def __repr__(self): + return "typing_extensions." + self._name + + def __getitem__(self, parameters): + return _GenericAlias(self, parameters) + + Literal = _LiteralForm( + "Literal", + doc="""A type that can be used to indicate to type checkers + that the corresponding value has a value literally equivalent + to the provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to + the value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime + checking verifying that the parameter is actually a value + instead of a type.""", + ) +elif hasattr(typing, "_FinalTypingBase"): + + class _Literal(typing._FinalTypingBase, _root=True): + """A type that can be used to indicate to type checkers that the + corresponding value has a value literally equivalent to the + provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to the + value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime checking + verifying that the parameter is actually a value instead of a type. + """ + + __slots__ = ("__values__",) + + def __init__(self, values=None, **kwds): + self.__values__ = values + + def __getitem__(self, values): + cls = type(self) + if self.__values__ is None: + if not isinstance(values, tuple): + values = (values,) + return cls(values, _root=True) + raise TypeError("{} cannot be further subscripted".format(cls.__name__[1:])) + + def _eval_type(self, globalns, localns): + return self + + def __repr__(self): + r = super().__repr__() + if self.__values__ is not None: + r += "[{}]".format(", ".join(map(typing._type_repr, self.__values__))) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__values__)) + + def __eq__(self, other): + if not isinstance(other, _Literal): + return NotImplemented + if self.__values__ is not None: + return self.__values__ == other.__values__ + return self is other + + Literal = _Literal(_root=True) +else: + + class _LiteralMeta(typing.TypingMeta): + """Metaclass for Literal""" + + def __new__(cls, name, bases, namespace, values=None, _root=False): + self = super().__new__(cls, name, bases, namespace, _root=_root) + if values is not None: + self.__values__ = values + return self + + def __instancecheck__(self, obj): + raise TypeError("Literal cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Literal cannot be used with issubclass().") + + def __getitem__(self, item): + cls = type(self) + if self.__values__ is not None: + raise TypeError( + "{} cannot be further subscripted".format(cls.__name__[1:]) + ) + + if not isinstance(item, tuple): + item = (item,) + return cls( + self.__name__, + self.__bases__, + dict(self.__dict__), + values=item, + _root=True, + ) + + def _eval_type(self, globalns, localns): + return self + + def __repr__(self): + r = super().__repr__() + if self.__values__ is not None: + r += "[{}]".format(", ".join(map(typing._type_repr, self.__values__))) + return r + + def __hash__(self): + return hash((type(self).__name__, self.__values__)) + + def __eq__(self, other): + if not isinstance(other, Literal): + return NotImplemented + if self.__values__ is not None: + return self.__values__ == other.__values__ + return self is other + + class Literal(typing.Final, metaclass=_LiteralMeta, _root=True): + """A type that can be used to indicate to type checkers that the + corresponding value has a value literally equivalent to the + provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to the + value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime checking + verifying that the parameter is actually a value instead of a type. + """ + + __values__ = None + + +def _overload_dummy(*args, **kwds): + """Helper for @overload to raise when called.""" + raise NotImplementedError( + "You should not call an overloaded function. " + "A series of @overload-decorated functions " + "outside a stub module should always be followed " + "by an implementation that is not @overload-ed." + ) + + +def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + """ + return _overload_dummy + + +# This is not a real generic class. Don't use outside annotations. +if hasattr(typing, "Type"): + Type = typing.Type +else: + # Internal type variable used for Type[]. + CT_co = typing.TypeVar("CT_co", covariant=True, bound=type) + + class Type(typing.Generic[CT_co], extra=type): + """A special construct usable to annotate class objects. + + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + U = TypeVar('U', bound=User) + def new_user(user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + """ + + __slots__ = () + + +# Various ABCs mimicking those in collections.abc. +# A few are simply re-exported for completeness. + + +def _define_guard(type_name): + """ + Returns True if the given type isn't defined in typing but + is defined in collections_abc. + + Adds the type to __all__ if the collection is found in either + typing or collection_abc. + """ + if hasattr(typing, type_name): + __all__.append(type_name) + globals()[type_name] = getattr(typing, type_name) + return False + elif hasattr(collections_abc, type_name): + __all__.append(type_name) + return True + else: + return False + + +class _ExtensionsGenericMeta(GenericMeta): + def __subclasscheck__(self, subclass): + """This mimics a more modern GenericMeta.__subclasscheck__() logic + (that does not have problems with recursion) to work around interactions + between collections, typing, and typing_extensions on older + versions of Python, see https://github.com/python/typing/issues/501. + """ + if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0): + if self.__origin__ is not None: + if sys._getframe(1).f_globals["__name__"] not in ["abc", "functools"]: + raise TypeError( + "Parameterized generics cannot be used with class " + "or instance checks" + ) + return False + if not self.__extra__: + return super().__subclasscheck__(subclass) + res = self.__extra__.__subclasshook__(subclass) + if res is not NotImplemented: + return res + if self.__extra__ in subclass.__mro__: + return True + for scls in self.__extra__.__subclasses__(): + if isinstance(scls, GenericMeta): + continue + if issubclass(subclass, scls): + return True + return False + + +if _define_guard("Awaitable"): + + class Awaitable( + typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.Awaitable, + ): + __slots__ = () + + +if _define_guard("Coroutine"): + + class Coroutine( + Awaitable[V_co], + typing.Generic[T_co, T_contra, V_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.Coroutine, + ): + __slots__ = () + + +if _define_guard("AsyncIterable"): + + class AsyncIterable( + typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.AsyncIterable, + ): + __slots__ = () + + +if _define_guard("AsyncIterator"): + + class AsyncIterator( + AsyncIterable[T_co], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.AsyncIterator, + ): + __slots__ = () + + +if hasattr(typing, "Deque"): + Deque = typing.Deque +elif _geqv_defined: + + class Deque( + collections.deque, + typing.MutableSequence[T], + metaclass=_ExtensionsGenericMeta, + extra=collections.deque, + ): + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, Deque): + return collections.deque(*args, **kwds) + return _generic_new(collections.deque, cls, *args, **kwds) + + +else: + + class Deque( + collections.deque, + typing.MutableSequence[T], + metaclass=_ExtensionsGenericMeta, + extra=collections.deque, + ): + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Deque: + return collections.deque(*args, **kwds) + return _generic_new(collections.deque, cls, *args, **kwds) + + +if hasattr(typing, "ContextManager"): + ContextManager = typing.ContextManager +elif hasattr(contextlib, "AbstractContextManager"): + + class ContextManager( + typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=contextlib.AbstractContextManager, + ): + __slots__ = () + + +else: + + class ContextManager(typing.Generic[T_co]): + __slots__ = () + + def __enter__(self): + return self + + @abc.abstractmethod + def __exit__(self, exc_type, exc_value, traceback): + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is ContextManager: + # In Python 3.6+, it is possible to set a method to None to + # explicitly indicate that the class does not implement an ABC + # (https://bugs.python.org/issue25958), but we do not support + # that pattern here because this fallback class is only used + # in Python 3.5 and earlier. + if any("__enter__" in B.__dict__ for B in C.__mro__) and any( + "__exit__" in B.__dict__ for B in C.__mro__ + ): + return True + return NotImplemented + + +if hasattr(typing, "AsyncContextManager"): + AsyncContextManager = typing.AsyncContextManager + __all__.append("AsyncContextManager") +elif hasattr(contextlib, "AbstractAsyncContextManager"): + + class AsyncContextManager( + typing.Generic[T_co], + metaclass=_ExtensionsGenericMeta, + extra=contextlib.AbstractAsyncContextManager, + ): + __slots__ = () + + __all__.append("AsyncContextManager") + +else: + + class AsyncContextManager(typing.Generic[T_co]): + __slots__ = () + + async def __aenter__(self): + return self + + @abc.abstractmethod + async def __aexit__(self, exc_type, exc_value, traceback): + return None + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncContextManager: + return _check_methods_in_mro(C, "__aenter__", "__aexit__") + return NotImplemented + + __all__.append("AsyncContextManager") + + +if hasattr(typing, "DefaultDict"): + DefaultDict = typing.DefaultDict +elif _geqv_defined: + + class DefaultDict( + collections.defaultdict, + typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.defaultdict, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, DefaultDict): + return collections.defaultdict(*args, **kwds) + return _generic_new(collections.defaultdict, cls, *args, **kwds) + + +else: + + class DefaultDict( + collections.defaultdict, + typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.defaultdict, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is DefaultDict: + return collections.defaultdict(*args, **kwds) + return _generic_new(collections.defaultdict, cls, *args, **kwds) + + +if hasattr(typing, "Counter"): + Counter = typing.Counter +elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1): + assert _geqv_defined + _TInt = typing.TypeVar("_TInt") + + class _CounterMeta(typing.GenericMeta): + """Metaclass for Counter""" + + def __getitem__(self, item): + return super().__getitem__((item, int)) + + class Counter( + collections.Counter, + typing.Dict[T, int], + metaclass=_CounterMeta, + extra=collections.Counter, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, Counter): + return collections.Counter(*args, **kwds) + return _generic_new(collections.Counter, cls, *args, **kwds) + + +elif _geqv_defined: + + class Counter( + collections.Counter, + typing.Dict[T, int], + metaclass=_ExtensionsGenericMeta, + extra=collections.Counter, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, Counter): + return collections.Counter(*args, **kwds) + return _generic_new(collections.Counter, cls, *args, **kwds) + + +else: + + class Counter( + collections.Counter, + typing.Dict[T, int], + metaclass=_ExtensionsGenericMeta, + extra=collections.Counter, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is Counter: + return collections.Counter(*args, **kwds) + return _generic_new(collections.Counter, cls, *args, **kwds) + + +if hasattr(typing, "ChainMap"): + ChainMap = typing.ChainMap + __all__.append("ChainMap") +elif hasattr(collections, "ChainMap"): + # ChainMap only exists in 3.3+ + if _geqv_defined: + + class ChainMap( + collections.ChainMap, + typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.ChainMap, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if _geqv(cls, ChainMap): + return collections.ChainMap(*args, **kwds) + return _generic_new(collections.ChainMap, cls, *args, **kwds) + + else: + + class ChainMap( + collections.ChainMap, + typing.MutableMapping[KT, VT], + metaclass=_ExtensionsGenericMeta, + extra=collections.ChainMap, + ): + + __slots__ = () + + def __new__(cls, *args, **kwds): + if cls._gorg is ChainMap: + return collections.ChainMap(*args, **kwds) + return _generic_new(collections.ChainMap, cls, *args, **kwds) + + __all__.append("ChainMap") + + +if _define_guard("AsyncGenerator"): + + class AsyncGenerator( + AsyncIterator[T_co], + typing.Generic[T_co, T_contra], + metaclass=_ExtensionsGenericMeta, + extra=collections_abc.AsyncGenerator, + ): + __slots__ = () + + +if hasattr(typing, "NewType"): + NewType = typing.NewType +else: + + def NewType(name, tp): + """NewType creates simple unique types with almost zero + runtime overhead. NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy function that simply returns its argument. Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num: int = UserId(5) + 1 + """ + + def new_type(x): + return x + + new_type.__name__ = name + new_type.__supertype__ = tp + return new_type + + +if hasattr(typing, "Text"): + Text = typing.Text +else: + Text = str + + +if hasattr(typing, "TYPE_CHECKING"): + TYPE_CHECKING = typing.TYPE_CHECKING +else: + # Constant that's True when type checking, but False here. + TYPE_CHECKING = False + + +def _gorg(cls): + """This function exists for compatibility with old typing versions.""" + assert isinstance(cls, GenericMeta) + if hasattr(cls, "_gorg"): + return cls._gorg + while cls.__origin__ is not None: + cls = cls.__origin__ + return cls + + +if OLD_GENERICS: + + def _next_in_mro(cls): # noqa + """This function exists for compatibility with old typing versions.""" + next_in_mro = object + for i, c in enumerate(cls.__mro__[:-1]): + if isinstance(c, GenericMeta) and _gorg(c) is Generic: + next_in_mro = cls.__mro__[i + 1] + return next_in_mro + + +_PROTO_WHITELIST = [ + "Callable", + "Awaitable", + "Iterable", + "Iterator", + "AsyncIterable", + "AsyncIterator", + "Hashable", + "Sized", + "Container", + "Collection", + "Reversible", + "ContextManager", + "AsyncContextManager", +] + + +def _get_protocol_attrs(cls): + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in ("Protocol", "Generic"): + continue + annotations = getattr(base, "__annotations__", {}) + for attr in list(base.__dict__.keys()) + list(annotations.keys()): + if not attr.startswith("_abc_") and attr not in ( + "__abstractmethods__", + "__annotations__", + "__weakref__", + "_is_protocol", + "_is_runtime_protocol", + "__dict__", + "__args__", + "__slots__", + "__next_in_mro__", + "__parameters__", + "__origin__", + "__orig_bases__", + "__extra__", + "__tree_hash__", + "__doc__", + "__subclasshook__", + "__init__", + "__new__", + "__module__", + "_MutableMapping__marker", + "_gorg", + ): + attrs.add(attr) + return attrs + + +def _is_callable_members_only(cls): + return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) + + +if hasattr(typing, "Protocol"): + Protocol = typing.Protocol +elif HAVE_PROTOCOLS and not PEP_560: + + class _ProtocolMeta(GenericMeta): + """Internal metaclass for Protocol. + + This exists so Protocol classes can be generic without deriving + from Generic. + """ + + if not OLD_GENERICS: + + def __new__( + cls, + name, + bases, + namespace, + tvars=None, + args=None, + origin=None, + extra=None, + orig_bases=None, + ): + # This is just a version copied from GenericMeta.__new__ that + # includes "Protocol" special treatment. (Comments removed for brevity.) + assert extra is None # Protocols should not have extra + if tvars is not None: + assert origin is not None + assert all(isinstance(t, TypeVar) for t in tvars), tvars + else: + tvars = _type_vars(bases) + gvars = None + for base in bases: + if base is Generic: + raise TypeError("Cannot inherit from plain Generic") + if isinstance(base, GenericMeta) and base.__origin__ in ( + Generic, + Protocol, + ): + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] or " + "Protocol[...] multiple times." + ) + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + raise TypeError( + "Some type variables (%s) " + "are not listed in %s[%s]" + % ( + ", ".join( + str(t) for t in tvars if t not in gvarset + ), + "Generic" + if any(b.__origin__ is Generic for b in bases) + else "Protocol", + ", ".join(str(g) for g in gvars), + ) + ) + tvars = gvars + + initial_bases = bases + if ( + extra is not None + and type(extra) is abc.ABCMeta + and extra not in bases + ): + bases = (extra,) + bases + bases = tuple( + _gorg(b) if isinstance(b, GenericMeta) else b for b in bases + ) + if any(isinstance(b, GenericMeta) and b is not Generic for b in bases): + bases = tuple(b for b in bases if b is not Generic) + namespace.update({"__origin__": origin, "__extra__": extra}) + self = super().__new__(cls, name, bases, namespace, _root=True) + super().__setattr__("_gorg", self if not origin else _gorg(origin)) + self.__parameters__ = tvars + self.__args__ = ( + tuple( + ... if a is _TypingEllipsis else () if a is _TypingEmpty else a + for a in args + ) + if args + else None + ) + self.__next_in_mro__ = _next_in_mro(self) + if orig_bases is None: + self.__orig_bases__ = initial_bases + elif origin is not None: + self._abc_registry = origin._abc_registry + self._abc_cache = origin._abc_cache + if hasattr(self, "_subs_tree"): + self.__tree_hash__ = ( + hash(self._subs_tree()) if origin else super().__hash__() + ) + return self + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + if not cls.__dict__.get("_is_protocol", None): + cls._is_protocol = any( + b is Protocol + or isinstance(b, _ProtocolMeta) + and b.__origin__ is Protocol + for b in cls.__bases__ + ) + if cls._is_protocol: + for base in cls.__mro__[1:]: + if not ( + base in (object, Generic) + or base.__module__ == "collections.abc" + and base.__name__ in _PROTO_WHITELIST + or isinstance(base, TypingMeta) + and base._is_protocol + or isinstance(base, GenericMeta) + and base.__origin__ is Generic + ): + raise TypeError( + "Protocols can only inherit from other " + "protocols, got %r" % base + ) + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError("Protocols cannot be instantiated") + + cls.__init__ = _no_init + + def _proto_hook(other): + if not cls.__dict__.get("_is_protocol", None): + return NotImplemented + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError("issubclass() arg 1 must be a class") + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, "__annotations__", {}) + if ( + isinstance(annotations, typing.Mapping) + and attr in annotations + and isinstance(other, _ProtocolMeta) + and other._is_protocol + ): + break + else: + return NotImplemented + return True + + if "__subclasshook__" not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + def __instancecheck__(self, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ( + not getattr(self, "_is_protocol", False) + or _is_callable_members_only(self) + ) and issubclass(type(instance), self): + return True + if self._is_protocol: + if all( + hasattr(instance, attr) + and ( + not callable(getattr(self, attr, None)) + or getattr(instance, attr) is not None + ) + for attr in _get_protocol_attrs(self) + ): + return True + return super().__instancecheck__(instance) + + def __subclasscheck__(self, cls): + if self.__origin__ is not None: + if sys._getframe(1).f_globals["__name__"] not in ["abc", "functools"]: + raise TypeError( + "Parameterized generics cannot be used with class " + "or instance checks" + ) + return False + if self.__dict__.get("_is_protocol", None) and not self.__dict__.get( + "_is_runtime_protocol", None + ): + if sys._getframe(1).f_globals["__name__"] in [ + "abc", + "functools", + "typing", + ]: + return False + raise TypeError( + "Instance and class checks can only be used with " + "@runtime protocols" + ) + if self.__dict__.get( + "_is_runtime_protocol", None + ) and not _is_callable_members_only(self): + if sys._getframe(1).f_globals["__name__"] in [ + "abc", + "functools", + "typing", + ]: + return super().__subclasscheck__(cls) + raise TypeError( + "Protocols with non-method members don't support issubclass()" + ) + return super().__subclasscheck__(cls) + + if not OLD_GENERICS: + + @_tp_cache + def __getitem__(self, params): + # We also need to copy this from GenericMeta.__getitem__ to get + # special treatment of "Protocol". (Comments removed for brevity.) + if not isinstance(params, tuple): + params = (params,) + if not params and _gorg(self) is not Tuple: + raise TypeError( + "Parameter list to %s[...] cannot be empty" % self.__qualname__ + ) + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if self in (Generic, Protocol): + if not all(isinstance(p, TypeVar) for p in params): + raise TypeError( + "Parameters to %r[...] must all be type variables" % self + ) + if len(set(params)) != len(params): + raise TypeError( + "Parameters to %r[...] must all be unique" % self + ) + tvars = params + args = params + elif self in (Tuple, Callable): + tvars = _type_vars(params) + args = params + elif self.__origin__ in (Generic, Protocol): + raise TypeError( + "Cannot subscript already-subscripted %s" % repr(self) + ) + else: + _check_generic(self, params) + tvars = _type_vars(params) + args = params + + prepend = (self,) if self.__origin__ is None else () + return type(self)( + self.__name__, + prepend + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=tvars, + args=args, + origin=self, + extra=self.__extra__, + orig_bases=self.__orig_bases__, + ) + + class Protocol(metaclass=_ProtocolMeta): + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto({bases}): + def meth(self) -> T: + ... + """ + + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if _gorg(cls) is Protocol: + raise TypeError( + "Type Protocol cannot be instantiated; " + "it can be used only as a base class" + ) + if OLD_GENERICS: + return _generic_new(_next_in_mro(cls), cls, *args, **kwds) + return _generic_new(cls.__next_in_mro__, cls, *args, **kwds) + + if Protocol.__doc__ is not None: + Protocol.__doc__ = Protocol.__doc__.format( + bases="Protocol, Generic[T]" if OLD_GENERICS else "Protocol[T]" + ) + + +elif PEP_560: + from typing import _collect_type_vars, _GenericAlias, _type_check # noqa + + class _ProtocolMeta(abc.ABCMeta): + # This metaclass is a bit unfortunate and exists only because of the lack + # of __instancehook__. + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ( + not getattr(cls, "_is_protocol", False) + or _is_callable_members_only(cls) + ) and issubclass(type(instance), cls): + return True + if cls._is_protocol: + if all( + hasattr(instance, attr) + and ( + not callable(getattr(cls, attr, None)) + or getattr(instance, attr) is not None + ) + for attr in _get_protocol_attrs(cls) + ): + return True + return super().__instancecheck__(instance) + + class Protocol(metaclass=_ProtocolMeta): + # There is quite a lot of overlapping code with typing.Generic. + # Unfortunately it is hard to avoid this while these live in two different + # modules. The duplicated code will be removed when Protocol is moved to typing. + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if cls is Protocol: + raise TypeError( + "Type Protocol cannot be instantiated; " + "it can only be used as a base class" + ) + return super().__new__(cls) + + @_tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple): + params = (params,) + if not params and cls is not Tuple: + raise TypeError( + "Parameter list to {}[...] cannot be empty".format(cls.__qualname__) + ) + msg = "Parameters to generic types must be types." + params = tuple(_type_check(p, msg) for p in params) + if cls is Protocol: + # Generic can only be subscripted with unique type variables. + if not all(isinstance(p, TypeVar) for p in params): + i = 0 + while isinstance(params[i], TypeVar): + i += 1 + raise TypeError( + "Parameters to Protocol[...] must all be type variables. " + "Parameter {} is {}".format(i + 1, params[i]) + ) + if len(set(params)) != len(params): + raise TypeError("Parameters to Protocol[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + _check_generic(cls, params) + return _GenericAlias(cls, params) + + def __init_subclass__(cls, *args, **kwargs): + tvars = [] + if "__orig_bases__" in cls.__dict__: + error = Generic in cls.__orig_bases__ + else: + error = Generic in cls.__bases__ + if error: + raise TypeError("Cannot inherit from plain Generic") + if "__orig_bases__" in cls.__dict__: + tvars = _collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if isinstance(base, _GenericAlias) and base.__origin__ in ( + Generic, + Protocol, + ): + # for error messages + the_base = ( + "Generic" if base.__origin__ is Generic else "Protocol" + ) + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...] " + "and/or Protocol[...] multiple types." + ) + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ", ".join(str(t) for t in tvars if t not in gvarset) + s_args = ", ".join(str(g) for g in gvars) + raise TypeError( + "Some type variables ({}) are " + "not listed in {}[{}]".format(s_vars, the_base, s_args) + ) + tvars = gvars + cls.__parameters__ = tuple(tvars) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get("_is_protocol", None): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + def _proto_hook(other): + if not cls.__dict__.get("_is_protocol", None): + return NotImplemented + if not getattr(cls, "_is_runtime_protocol", False): + if sys._getframe(2).f_globals["__name__"] in ["abc", "functools"]: + return NotImplemented + raise TypeError( + "Instance and class checks can only be used with " + "@runtime protocols" + ) + if not _is_callable_members_only(cls): + if sys._getframe(2).f_globals["__name__"] in ["abc", "functools"]: + return NotImplemented + raise TypeError( + "Protocols with non-method members " + "don't support issubclass()" + ) + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError("issubclass() arg 1 must be a class") + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, "__annotations__", {}) + if ( + isinstance(annotations, typing.Mapping) + and attr in annotations + and isinstance(other, _ProtocolMeta) + and other._is_protocol + ): + break + else: + return NotImplemented + return True + + if "__subclasshook__" not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # We have nothing more to do for non-protocols. + if not cls._is_protocol: + return + + # Check consistency of bases. + for base in cls.__bases__: + if not ( + base in (object, Generic) + or base.__module__ == "collections.abc" + and base.__name__ in _PROTO_WHITELIST + or isinstance(base, _ProtocolMeta) + and base._is_protocol + ): + raise TypeError( + "Protocols can only inherit from other " + "protocols, got %r" % base + ) + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError("Protocols cannot be instantiated") + + cls.__init__ = _no_init + + +if hasattr(typing, "runtime_checkable"): + runtime_checkable = typing.runtime_checkable +elif HAVE_PROTOCOLS: + + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol, so that it + can be used with isinstance() and issubclass(). Raise TypeError + if applied to a non-protocol class. + + This allows a simple-minded structural check very similar to the + one-offs in collections.abc such as Hashable. + """ + if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: + raise TypeError( + "@runtime_checkable can be only applied to protocol classes, " + "got %r" % cls + ) + cls._is_runtime_protocol = True + return cls + + +if HAVE_PROTOCOLS: + # Exists for backwards compatibility. + runtime = runtime_checkable + + +if hasattr(typing, "SupportsIndex"): + SupportsIndex = typing.SupportsIndex +elif HAVE_PROTOCOLS: + + @runtime_checkable + class SupportsIndex(Protocol): + __slots__ = () + + @abc.abstractmethod + def __index__(self) -> int: + pass + + +if sys.version_info[:2] >= (3, 9): + # The standard library TypedDict in Python 3.8 does not store runtime information + # about which (if any) keys are optional. See https://bugs.python.org/issue38834 + TypedDict = typing.TypedDict +else: + + def _check_fails(cls, other): + try: + if sys._getframe(1).f_globals["__name__"] not in [ + "abc", + "functools", + "typing", + ]: + # Typed dicts are only for static structural subtyping. + raise TypeError("TypedDict does not support instance and class checks") + except (AttributeError, ValueError): + pass + return False + + def _dict_new(*args, **kwargs): + if not args: + raise TypeError("TypedDict.__new__(): not enough arguments") + _, args = args[0], args[1:] # allow the "cls" keyword be passed + return dict(*args, **kwargs) + + _dict_new.__text_signature__ = "($cls, _typename, _fields=None, /, **kwargs)" + + def _typeddict_new(*args, total=True, **kwargs): + if not args: + raise TypeError("TypedDict.__new__(): not enough arguments") + _, args = args[0], args[1:] # allow the "cls" keyword be passed + if args: + typename, args = ( + args[0], + args[1:], + ) # allow the "_typename" keyword be passed + elif "_typename" in kwargs: + typename = kwargs.pop("_typename") + import warnings + + warnings.warn( + "Passing '_typename' as keyword argument is deprecated", + DeprecationWarning, + stacklevel=2, + ) + else: + raise TypeError( + "TypedDict.__new__() missing 1 required positional " + "argument: '_typename'" + ) + if args: + try: + (fields,) = args # allow the "_fields" keyword be passed + except ValueError: + raise TypeError( + "TypedDict.__new__() takes from 2 to 3 " + "positional arguments but {} " + "were given".format(len(args) + 2) + ) + elif "_fields" in kwargs and len(kwargs) == 1: + fields = kwargs.pop("_fields") + import warnings + + warnings.warn( + "Passing '_fields' as keyword argument is deprecated", + DeprecationWarning, + stacklevel=2, + ) + else: + fields = None + + if fields is None: + fields = kwargs + elif kwargs: + raise TypeError( + "TypedDict takes either a dict or keyword arguments, but not both" + ) + + ns = {"__annotations__": dict(fields), "__total__": total} + try: + # Setting correct module is necessary to make typed dict classes pickleable. + ns["__module__"] = sys._getframe(1).f_globals.get("__name__", "__main__") + except (AttributeError, ValueError): + pass + + return _TypedDictMeta(typename, (), ns) + + _typeddict_new.__text_signature__ = ( + "($cls, _typename, _fields=None, /, *, total=True, **kwargs)" + ) + + class _TypedDictMeta(type): + def __new__(cls, name, bases, ns, total=True): + # Create new typed dict class object. + # This method is called directly when TypedDict is subclassed, + # or via _typeddict_new when TypedDict is instantiated. This way + # TypedDict supports all three syntaxes described in its docstring. + # Subclasses and instances of TypedDict return actual dictionaries + # via _dict_new. + ns["__new__"] = _typeddict_new if name == "TypedDict" else _dict_new + tp_dict = super().__new__(cls, name, (dict,), ns) + + annotations = {} + own_annotations = ns.get("__annotations__", {}) + own_annotation_keys = set(own_annotations.keys()) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_annotations = { + n: typing._type_check(tp, msg) for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + + for base in bases: + annotations.update(base.__dict__.get("__annotations__", {})) + required_keys.update(base.__dict__.get("__required_keys__", ())) + optional_keys.update(base.__dict__.get("__optional_keys__", ())) + + annotations.update(own_annotations) + if total: + required_keys.update(own_annotation_keys) + else: + optional_keys.update(own_annotation_keys) + + tp_dict.__annotations__ = annotations + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + if not hasattr(tp_dict, "__total__"): + tp_dict.__total__ = total + return tp_dict + + __instancecheck__ = __subclasscheck__ = _check_fails + + TypedDict = _TypedDictMeta("TypedDict", (dict,), {}) + TypedDict.__module__ = __name__ + TypedDict.__doc__ = """A simple typed name space. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, with each key + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + """ + + +# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) +if hasattr(typing, "Annotated"): + Annotated = typing.Annotated + get_type_hints = typing.get_type_hints + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +elif PEP_560: + + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return "typing_extensions.Annotated[{}, {}]".format( + typing._type_repr(self.__origin__), + ", ".join(repr(a) for a in self.__metadata__), + ) + + def __reduce__(self): + return operator.getitem, (Annotated, (self.__origin__,) + self.__metadata__) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @_tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError( + "Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation)." + ) + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError("Cannot subclass {}.Annotated".format(cls.__module__)) + + def _strip_annotations(t): + """Strips the annotations from a given type. + """ + if isinstance(t, _AnnotatedAlias): + return _strip_annotations(t.__origin__) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_annotations(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + res = t.copy_with(stripped_args) + res._special = t._special + return res + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval and exec work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_annotations(t) for k, t in hint.items()} + + +elif HAVE_ANNOTATED: + + def _is_dunder(name): + """Returns True if name is a __dunder_variable_name__.""" + return len(name) > 4 and name.startswith("__") and name.endswith("__") + + # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality + # checks, argument expansion etc. are done on the _subs_tre. As a result we + # can't provide a get_type_hints function that strips out annotations. + + class AnnotatedMeta(typing.GenericMeta): + """Metaclass for Annotated""" + + def __new__(cls, name, bases, namespace, **kwargs): + if any(b is not object for b in bases): + raise TypeError("Cannot subclass " + str(Annotated)) + return super().__new__(cls, name, bases, namespace, **kwargs) + + @property + def __metadata__(self): + return self._subs_tree()[2] + + def _tree_repr(self, tree): + cls, origin, metadata = tree + if not isinstance(origin, tuple): + tp_repr = typing._type_repr(origin) + else: + tp_repr = origin[0]._tree_repr(origin) + metadata_reprs = ", ".join(repr(arg) for arg in metadata) + return "%s[%s, %s]" % (cls, tp_repr, metadata_reprs) + + def _subs_tree(self, tvars=None, args=None): # noqa + if self is Annotated: + return Annotated + res = super()._subs_tree(tvars=tvars, args=args) + # Flatten nested Annotated + if isinstance(res[1], tuple) and res[1][0] is Annotated: + sub_tp = res[1][1] + sub_annot = res[1][2] + return (Annotated, sub_tp, sub_annot + res[2]) + return res + + def _get_cons(self): + """Return the class used to create instance of this type.""" + if self.__origin__ is None: + raise TypeError( + "Cannot get the underlying type of a " + "non-specialized Annotated type." + ) + tree = self._subs_tree() + while isinstance(tree, tuple) and tree[0] is Annotated: + tree = tree[1] + if isinstance(tree, tuple): + return tree[0] + else: + return tree + + @_tp_cache + def __getitem__(self, params): + if not isinstance(params, tuple): + params = (params,) + if self.__origin__ is not None: # specializing an instantiated type + return super().__getitem__(params) + elif not isinstance(params, tuple) or len(params) < 2: + raise TypeError( + "Annotated[...] should be instantiated " + "with at least two arguments (a type and an " + "annotation)." + ) + else: + msg = "Annotated[t, ...]: t must be a type." + tp = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return type(self)( + self.__name__, + self.__bases__, + _no_slots_copy(self.__dict__), + tvars=_type_vars((tp,)), + # Metadata is a tuple so it won't be touched by _replace_args et al. + args=(tp, metadata), + origin=self, + ) + + def __call__(self, *args, **kwargs): + cons = self._get_cons() + result = cons(*args, **kwargs) + try: + result.__orig_class__ = self + except AttributeError: + pass + return result + + def __getattr__(self, attr): + # For simplicity we just don't relay all dunder names + if self.__origin__ is not None and not _is_dunder(attr): + return getattr(self._get_cons(), attr) + raise AttributeError(attr) + + def __setattr__(self, attr, value): + if _is_dunder(attr) or attr.startswith("_abc_"): + super().__setattr__(attr, value) + elif self.__origin__ is None: + raise AttributeError(attr) + else: + setattr(self._get_cons(), attr, value) + + def __instancecheck__(self, obj): + raise TypeError("Annotated cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("Annotated cannot be used with issubclass().") + + class Annotated(metaclass=AnnotatedMeta): + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type, the remaining + arguments are kept as a tuple in the __metadata__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + +# Python 3.8 has get_origin() and get_args() but those implementations aren't +# Annotated-aware, so we can't use those, only Python 3.9 versions will do. +if sys.version_info[:2] >= (3, 9): + get_origin = typing.get_origin + get_args = typing.get_args +elif PEP_560: + from typing import _GenericAlias # noqa + + def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar + and Annotated. Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, _GenericAlias): + return tp.__origin__ + if tp is Generic: + return Generic + return None + + def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, _GenericAlias): + res = tp.__args__ + if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return () + + +if hasattr(typing, "TypeAlias"): + TypeAlias = typing.TypeAlias +elif sys.version_info[:2] >= (3, 9): + + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return "typing_extensions." + self._name + + @_TypeAliasForm + def TypeAlias(self, parameters): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError("{} is not subscriptable".format(self)) + + +elif sys.version_info[:2] >= (3, 7): + + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return "typing_extensions." + self._name + + TypeAlias = _TypeAliasForm( + "TypeAlias", + doc="""Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example + above.""", + ) + +elif hasattr(typing, "_FinalTypingBase"): + + class _TypeAliasMeta(typing.TypingMeta): + """Metaclass for TypeAlias""" + + def __repr__(self): + return "typing_extensions.TypeAlias" + + class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + + __slots__ = () + + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") + + def __repr__(self): + return "typing_extensions.TypeAlias" + + TypeAlias = _TypeAliasBase(_root=True) +else: + + class _TypeAliasMeta(typing.TypingMeta): + """Metaclass for TypeAlias""" + + def __instancecheck__(self, obj): + raise TypeError("TypeAlias cannot be used with isinstance().") + + def __subclasscheck__(self, cls): + raise TypeError("TypeAlias cannot be used with issubclass().") + + def __call__(self, *args, **kwargs): + raise TypeError("Cannot instantiate TypeAlias") + + class TypeAlias(metaclass=_TypeAliasMeta, _root=True): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + + __slots__ = () diff --git a/setup.cfg b/setup.cfg index a47bc88d282ab..2d1c8037636de 100644 --- a/setup.cfg +++ b/setup.cfg @@ -68,6 +68,7 @@ omit = */tests/* pandas/_typing.py pandas/_version.py + pandas/_vendored/typing_extensions.py plugins = Cython.Coverage [coverage:report] @@ -99,7 +100,7 @@ directory = coverage_html_report # To be kept consistent with "Import Formatting" section in contributing.rst [isort] -known_pre_libs = pandas._config +known_pre_libs = pandas._config,pandas._vendored known_pre_core = pandas._libs,pandas._typing,pandas.util._*,pandas.compat,pandas.errors known_dtypes = pandas.core.dtypes known_post_core = pandas.tseries,pandas.io,pandas.plotting @@ -113,7 +114,7 @@ combine_as_imports = True line_length = 88 force_sort_within_sections = True skip_glob = env, -skip = pandas/__init__.py +skip = pandas/__init__.py,pandas/_vendored/typing_extensions.py [mypy] ignore_missing_imports=True @@ -124,6 +125,10 @@ warn_redundant_casts = True warn_unused_ignores = True show_error_codes = True +[mypy-pandas._vendored.*] +check_untyped_defs=False +ignore_errors=True + [mypy-pandas.tests.*] check_untyped_defs=False From 0bc407ab82f186a3f37512264570078779aa175f Mon Sep 17 00:00:00 2001 From: Erfan Nariman <34067903+erfannariman@users.noreply.github.com> Date: Wed, 2 Sep 2020 01:52:08 +0200 Subject: [PATCH 390/632] Added numba as an argument (#35778) --- doc/source/user_guide/computation.rst | 3 +++ doc/source/user_guide/enhancingperf.rst | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst index d7875e5b8d861..151ef36be7c98 100644 --- a/doc/source/user_guide/computation.rst +++ b/doc/source/user_guide/computation.rst @@ -361,6 +361,9 @@ compute the mean absolute deviation on a rolling basis: @savefig rolling_apply_ex.png s.rolling(window=60).apply(mad, raw=True).plot(style='k') +Using the Numba engine +~~~~~~~~~~~~~~~~~~~~~~ + .. versionadded:: 1.0 Additionally, :meth:`~Rolling.apply` can leverage `Numba `__ diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index 24fcb369804c6..9e101c1a20371 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -373,6 +373,13 @@ nicer interface by passing/returning pandas objects. In this example, using Numba was faster than Cython. +Numba as an argument +~~~~~~~~~~~~~~~~~~~~ + +Additionally, we can leverage the power of `Numba `__ +by calling it as an argument in :meth:`~Rolling.apply`. See :ref:`Computation tools +` for an extensive example. + Vectorize ~~~~~~~~~ From 075ed8b35880a649f6d1297770c388bd380ff1a0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 18:56:40 -0700 Subject: [PATCH 391/632] REF: handle axis=None case inside DataFrame.any/all to simplify _reduce (#35899) * REF: remove unnecesary try/except * TST: add test for agg on ordered categorical cols (#35630) * TST: resample does not yield empty groups (#10603) (#35799) * revert accidental rebase * REF: handle axis=None cases inside DataFrame.all/any * annotate * dummy commit to force Travis Co-authored-by: Karthik Mathur <22126205+mathurk1@users.noreply.github.com> Co-authored-by: tkmz-n <60312218+tkmz-n@users.noreply.github.com> --- pandas/core/frame.py | 61 +++++++++++++++--------------------------- pandas/core/generic.py | 8 ++++++ 2 files changed, 30 insertions(+), 39 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 312d449e36022..e78c15d125e8d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8617,14 +8617,11 @@ def _reduce( cols = self.columns[~dtype_is_dt] self = self[cols] - if axis is None and filter_type == "bool": - labels = None - constructor = None - else: - # TODO: Make other agg func handle axis=None properly - axis = self._get_axis_number(axis) - labels = self._get_agg_axis(axis) - constructor = self._constructor + # TODO: Make other agg func handle axis=None properly + axis = self._get_axis_number(axis) + labels = self._get_agg_axis(axis) + constructor = self._constructor + assert axis in [0, 1] def func(values): if is_extension_array_dtype(values.dtype): @@ -8632,7 +8629,7 @@ def func(values): else: return op(values, axis=axis, skipna=skipna, **kwds) - def _get_data(axis_matters): + def _get_data(axis_matters: bool) -> "DataFrame": if filter_type is None: data = self._get_numeric_data() elif filter_type == "bool": @@ -8649,7 +8646,7 @@ def _get_data(axis_matters): raise NotImplementedError(msg) return data - if numeric_only is not None and axis in [0, 1]: + if numeric_only is not None: df = self if numeric_only is True: df = _get_data(axis_matters=True) @@ -8675,6 +8672,8 @@ def blk_func(values): out[:] = coerce_to_dtypes(out.values, df.dtypes) return out + assert numeric_only is None + if not self._is_homogeneous_type or self._mgr.any_extension_types: # try to avoid self.values call @@ -8702,40 +8701,24 @@ def blk_func(values): result = result.iloc[0].rename(None) return result - if numeric_only is None: - data = self - values = data.values - - try: - result = func(values) - - except TypeError: - # e.g. in nanops trying to convert strs to float + data = self + values = data.values - # TODO: why doesnt axis matter here? - data = _get_data(axis_matters=False) - labels = data._get_agg_axis(axis) + try: + result = func(values) - values = data.values - with np.errstate(all="ignore"): - result = func(values) + except TypeError: + # e.g. in nanops trying to convert strs to float - else: - if numeric_only: - data = _get_data(axis_matters=True) - labels = data._get_agg_axis(axis) + # TODO: why doesnt axis matter here? + data = _get_data(axis_matters=False) + labels = data._get_agg_axis(axis) - values = data.values - else: - data = self - values = data.values - result = func(values) + values = data.values + with np.errstate(all="ignore"): + result = func(values) - if filter_type == "bool" and is_object_dtype(values) and axis is None: - # work around https://github.com/numpy/numpy/issues/10489 - # TODO: can we de-duplicate parts of this with the next blocK? - result = np.bool_(result) - elif hasattr(result, "dtype") and is_object_dtype(result.dtype): + if is_object_dtype(result.dtype): try: if filter_type is None: result = result.astype(np.float64) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 3bad2d6dd18b9..c80a95f79a7a0 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -11499,6 +11499,14 @@ def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs "Option bool_only is not implemented with option level." ) return self._agg_by_level(name, axis=axis, level=level, skipna=skipna) + + if self.ndim > 1 and axis is None: + # Reduce along one dimension then the other, to simplify DataFrame._reduce + res = logical_func( + self, axis=0, bool_only=bool_only, skipna=skipna, **kwargs + ) + return logical_func(res, skipna=skipna, **kwargs) + return self._reduce( func, name=name, From 20569007ea256d3126214ab845190d08f88c73a9 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 20:18:16 -0700 Subject: [PATCH 392/632] BUG: BlockSlider not clearing index._cache (#35937) * REF: remove unnecesary try/except * TST: add test for agg on ordered categorical cols (#35630) * TST: resample does not yield empty groups (#10603) (#35799) * revert accidental rebase * BUG: BlockSlider not clearing index._cache * update whatsnew Co-authored-by: Karthik Mathur <22126205+mathurk1@users.noreply.github.com> Co-authored-by: tkmz-n <60312218+tkmz-n@users.noreply.github.com> --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/reduction.pyx | 3 +++ pandas/tests/groupby/test_allowlist.py | 6 +++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 407e8ba029ada..fca7e7d209031 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -256,6 +256,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply()`` (:issue:`34656`) - Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`) - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) +- Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Reshaping diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 7b36bc8baf891..8161b5c5c2b11 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -53,6 +53,7 @@ cdef class _BaseGrouper: # to a 1-d ndarray like datetime / timedelta / period. object.__setattr__(cached_ityp, '_index_data', islider.buf) cached_ityp._engine.clear_mapping() + cached_ityp._cache.clear() # e.g. inferred_freq must go object.__setattr__(cached_typ._mgr._block, 'values', vslider.buf) object.__setattr__(cached_typ._mgr._block, 'mgr_locs', slice(len(vslider.buf))) @@ -71,6 +72,7 @@ cdef class _BaseGrouper: object res cached_ityp._engine.clear_mapping() + cached_ityp._cache.clear() # e.g. inferred_freq must go res = self.f(cached_typ) res = _extract_result(res) if not initialized: @@ -455,6 +457,7 @@ cdef class BlockSlider: object.__setattr__(self.index, '_index_data', self.idx_slider.buf) self.index._engine.clear_mapping() + self.index._cache.clear() # e.g. inferred_freq must go cdef reset(self): cdef: diff --git a/pandas/tests/groupby/test_allowlist.py b/pandas/tests/groupby/test_allowlist.py index 0fd66cc047017..4a735fc7bb686 100644 --- a/pandas/tests/groupby/test_allowlist.py +++ b/pandas/tests/groupby/test_allowlist.py @@ -369,7 +369,6 @@ def test_groupby_selection_with_methods(df): "ffill", "bfill", "pct_change", - "tshift", ] for m in methods: @@ -379,6 +378,11 @@ def test_groupby_selection_with_methods(df): # should always be frames! tm.assert_frame_equal(res, exp) + # check that the index cache is cleared + with pytest.raises(ValueError, match="Freq was not set in the index"): + # GH#35937 + g.tshift() + # methods which aren't just .foo() tm.assert_frame_equal(g.fillna(0), g_exp.fillna(0)) tm.assert_frame_equal(g.dtypes, g_exp.dtypes) From 73c1d3269830d787c8990de8f02bf4279d2720ab Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 1 Sep 2020 20:19:26 -0700 Subject: [PATCH 393/632] BUG: NDFrame.replace wrong exception type, wrong return when size==0 (#36045) * REF: remove unnecesary try/except * TST: add test for agg on ordered categorical cols (#35630) * TST: resample does not yield empty groups (#10603) (#35799) * revert accidental rebase * BUG: NDFrame.replace wrong exception type, wrong return when size==0 * bool->bool_t * whatsnew Co-authored-by: Karthik Mathur <22126205+mathurk1@users.noreply.github.com> Co-authored-by: tkmz-n <60312218+tkmz-n@users.noreply.github.com> --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/generic.py | 14 +++++++------ pandas/tests/series/methods/test_replace.py | 23 +++++++++++++++++++++ 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index fca7e7d209031..0cfe010b63a6f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -282,7 +282,7 @@ ExtensionArray Other ^^^^^ -- +- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c80a95f79a7a0..233d48bfc85c3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6179,8 +6179,8 @@ def replace( self, to_replace=None, value=None, - inplace=False, - limit=None, + inplace: bool_t = False, + limit: Optional[int] = None, regex=False, method="pad", ): @@ -6256,7 +6256,7 @@ def replace( If True, in place. Note: this will modify any other views on this object (e.g. a column from a DataFrame). Returns the caller if this is True. - limit : int, default None + limit : int or None, default None Maximum size gap to forward or backward fill. regex : bool or same types as `to_replace`, default False Whether to interpret `to_replace` and/or `value` as regular @@ -6490,7 +6490,7 @@ def replace( inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: - raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool") + raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is None: # passing a single value that is scalar like @@ -6550,12 +6550,14 @@ def replace( # need a non-zero len on all axes if not self.size: - return self + if inplace: + return + return self.copy() if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of - # `in foo`is needed for when we have a Series and not dict + # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index f78a28c66e946..ccaa005369a1c 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -397,6 +397,29 @@ def test_replace_invalid_to_replace(self): with pytest.raises(TypeError, match=msg): series.replace(lambda x: x.strip()) + @pytest.mark.parametrize("frame", [False, True]) + def test_replace_nonbool_regex(self, frame): + obj = pd.Series(["a", "b", "c "]) + if frame: + obj = obj.to_frame() + + msg = "'to_replace' must be 'None' if 'regex' is not a bool" + with pytest.raises(ValueError, match=msg): + obj.replace(to_replace=["a"], regex="foo") + + @pytest.mark.parametrize("frame", [False, True]) + def test_replace_empty_copy(self, frame): + obj = pd.Series([], dtype=np.float64) + if frame: + obj = obj.to_frame() + + res = obj.replace(4, 5, inplace=True) + assert res is None + + res = obj.replace(4, 5, inplace=False) + tm.assert_equal(res, obj) + assert res is not obj + def test_replace_only_one_dictlike_arg(self): # GH#33340 From 1fc244f89108347804ea33c7e912f933c2aa63da Mon Sep 17 00:00:00 2001 From: Jonathan Shreckengost Date: Wed, 2 Sep 2020 09:28:45 -0400 Subject: [PATCH 394/632] Comma cleanup for #35925 (#36058) --- pandas/tests/generic/test_finalize.py | 10 ++++------ pandas/tests/generic/test_to_xarray.py | 4 +--- pandas/tests/groupby/aggregate/test_numba.py | 8 ++++---- pandas/tests/groupby/test_apply.py | 4 +--- pandas/tests/groupby/test_categorical.py | 4 ++-- pandas/tests/groupby/test_groupby.py | 2 +- pandas/tests/groupby/test_groupby_dropna.py | 4 +--- pandas/tests/groupby/test_groupby_subclass.py | 4 +--- pandas/tests/groupby/test_size.py | 2 +- 9 files changed, 16 insertions(+), 26 deletions(-) diff --git a/pandas/tests/generic/test_finalize.py b/pandas/tests/generic/test_finalize.py index 4d0f1a326225d..8898619e374ab 100644 --- a/pandas/tests/generic/test_finalize.py +++ b/pandas/tests/generic/test_finalize.py @@ -123,7 +123,7 @@ (pd.DataFrame, frame_data, operator.methodcaller("sort_index")), (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")), (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")), - (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel"),), + (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")), pytest.param( ( pd.DataFrame, @@ -178,7 +178,7 @@ marks=not_implemented_mark, ), pytest.param( - (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack"),), + (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")), marks=not_implemented_mark, ), pytest.param( @@ -317,7 +317,7 @@ marks=not_implemented_mark, ), pytest.param( - (pd.Series, ([1, 2],), operator.methodcaller("squeeze")), + (pd.Series, ([1, 2],), operator.methodcaller("squeeze")) # marks=not_implemented_mark, ), (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")), @@ -733,9 +733,7 @@ def test_timedelta_property(attr): assert result.attrs == {"a": 1} -@pytest.mark.parametrize( - "method", [operator.methodcaller("total_seconds")], -) +@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")]) @not_implemented_mark def test_timedelta_methods(method): s = pd.Series(pd.timedelta_range("2000", periods=4)) diff --git a/pandas/tests/generic/test_to_xarray.py b/pandas/tests/generic/test_to_xarray.py index ab56a752f7e90..a85d7ddc1ea53 100644 --- a/pandas/tests/generic/test_to_xarray.py +++ b/pandas/tests/generic/test_to_xarray.py @@ -47,9 +47,7 @@ def test_to_xarray_index_types(self, index): expected = df.copy() expected["f"] = expected["f"].astype(object) expected.columns.name = None - tm.assert_frame_equal( - result.to_dataframe(), expected, - ) + tm.assert_frame_equal(result.to_dataframe(), expected) @td.skip_if_no("xarray", min_version="0.7.0") def test_to_xarray(self): diff --git a/pandas/tests/groupby/aggregate/test_numba.py b/pandas/tests/groupby/aggregate/test_numba.py index 29e65e938f6f9..c4266996748c2 100644 --- a/pandas/tests/groupby/aggregate/test_numba.py +++ b/pandas/tests/groupby/aggregate/test_numba.py @@ -57,7 +57,7 @@ def func_numba(values, index): func_numba = numba.jit(func_numba) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -90,7 +90,7 @@ def func_2(values, index): func_2 = numba.jit(func_2) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -121,7 +121,7 @@ def func_1(values, index): return np.mean(values) - 3.4 data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) expected = grouped.agg(func_1, engine="numba") @@ -142,7 +142,7 @@ def func_1(values, index): ) def test_multifunc_notimplimented(agg_func): data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) with pytest.raises(NotImplementedError, match="Numba engine can"): diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index a1dcb28a32c6c..3183305fe2933 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -946,9 +946,7 @@ def fct(group): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize( - "function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1], -) +@pytest.mark.parametrize("function", [lambda gr: gr.index, lambda gr: gr.index + 1 - 1]) def test_apply_function_index_return(function): # GH: 22541 df = pd.DataFrame([1, 2, 2, 2, 1, 2, 3, 1, 3, 1], columns=["id"]) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 13a32e285e70a..711daf7fe415d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -17,7 +17,7 @@ def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN): - """ Reindex to a cartesian production for the groupers, + """Reindex to a cartesian production for the groupers, preserving the nature (Categorical) of each grouper """ @@ -1449,7 +1449,7 @@ def test_groupby_agg_categorical_columns(func, expected_values): result = df.groupby("groups").agg(func) expected = pd.DataFrame( - {"value": expected_values}, index=pd.Index([0, 1, 2], name="groups"), + {"value": expected_values}, index=pd.Index([0, 1, 2], name="groups") ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index c743058c988b4..eec9e8064d584 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -676,7 +676,7 @@ def test_ops_not_as_index(reduction_func): if reduction_func in ("corrwith",): pytest.skip("Test not applicable") - if reduction_func in ("nth", "ngroup",): + if reduction_func in ("nth", "ngroup"): pytest.skip("Skip until behavior is determined (GH #5755)") df = DataFrame(np.random.randint(0, 5, size=(100, 2)), columns=["a", "b"]) diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index adf62c4723526..d1501111cb22b 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -246,9 +246,7 @@ def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs): (pd.Period("2020-01-01"), pd.Period("2020-02-01")), ], ) -@pytest.mark.parametrize( - "dropna, values", [(True, [12, 3]), (False, [12, 3, 6],)], -) +@pytest.mark.parametrize("dropna, values", [(True, [12, 3]), (False, [12, 3, 6])]) def test_groupby_dropna_datetime_like_data( dropna, values, datetime1, datetime2, unique_nulls_fixture, unique_nulls_fixture2 ): diff --git a/pandas/tests/groupby/test_groupby_subclass.py b/pandas/tests/groupby/test_groupby_subclass.py index 7271911c5f80f..cc7a79e976513 100644 --- a/pandas/tests/groupby/test_groupby_subclass.py +++ b/pandas/tests/groupby/test_groupby_subclass.py @@ -51,9 +51,7 @@ def test_groupby_preserves_subclass(obj, groupby_func): tm.assert_series_equal(result1, result2) -@pytest.mark.parametrize( - "obj", [DataFrame, tm.SubclassedDataFrame], -) +@pytest.mark.parametrize("obj", [DataFrame, tm.SubclassedDataFrame]) def test_groupby_resample_preserves_subclass(obj): # GH28330 -- preserve subclass through groupby.resample() diff --git a/pandas/tests/groupby/test_size.py b/pandas/tests/groupby/test_size.py index 9cff8b966dad0..ba27e5a24ba00 100644 --- a/pandas/tests/groupby/test_size.py +++ b/pandas/tests/groupby/test_size.py @@ -53,7 +53,7 @@ def test_size_on_categorical(as_index): result = df.groupby(["A", "B"], as_index=as_index).size() expected = DataFrame( - [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"], + [[1, 1, 1], [1, 2, 0], [2, 1, 0], [2, 2, 1]], columns=["A", "B", "size"] ) expected["A"] = expected["A"].astype("category") if as_index: From 19c3d4013d41198d1fa40dbaea3140bd11e0564a Mon Sep 17 00:00:00 2001 From: Kaiqi Dong Date: Wed, 2 Sep 2020 17:00:49 +0200 Subject: [PATCH 395/632] API: replace dropna=False option with na_sentinel=None in factorize (#35852) * remove \n from docstring * fix issue 17038 * revert change * revert change * add dropna doc for factorize * rephrase the doc * flake8 * fixup * use NaN * add dropna in series.factorize * black * add test * linting * linting * doct * fix black * fixup * fix doctest * add whatsnew * linting * fix test * try one time * hide dropna and use na_sentinel=None * update whatsnew * rename test function * remove dropna from factorize * update doc * docstring * update doc * add comment * code change on review * update doc * code change on review * minor move in whatsnew * add default example * doc * one more try * explicit doc * add space --- doc/source/whatsnew/v1.1.2.rst | 8 ++++++ pandas/core/algorithms.py | 33 +++++++++++++++++++--- pandas/core/base.py | 2 +- pandas/core/groupby/grouper.py | 7 ++++- pandas/tests/base/test_factorize.py | 13 +++++++++ pandas/tests/test_algos.py | 44 ++++++----------------------- 6 files changed, 66 insertions(+), 41 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 9b1ad658d4666..fdfb084b47a89 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -35,6 +35,14 @@ Bug fixes .. --------------------------------------------------------------------------- +.. _whatsnew_112.other: + +Other +~~~~~ +- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize`(:issue:`35667`) + +.. --------------------------------------------------------------------------- + .. _whatsnew_112.contributors: Contributors diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6d6bb21165814..d2af6c132eca2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -526,9 +526,8 @@ def _factorize_array( def factorize( values, sort: bool = False, - na_sentinel: int = -1, + na_sentinel: Optional[int] = -1, size_hint: Optional[int] = None, - dropna: bool = True, ) -> Tuple[np.ndarray, Union[np.ndarray, ABCIndex]]: """ Encode the object as an enumerated type or categorical variable. @@ -541,8 +540,11 @@ def factorize( Parameters ---------- {values}{sort} - na_sentinel : int, default -1 - Value to mark "not found". + na_sentinel : int or None, default -1 + Value to mark "not found". If None, will not drop the NaN + from the uniques of the values. + + .. versionchanged:: 1.1.2 {size_hint}\ Returns @@ -620,6 +622,22 @@ def factorize( array([0, 0, 1]...) >>> uniques Index(['a', 'c'], dtype='object') + + If NaN is in the values, and we want to include NaN in the uniques of the + values, it can be achieved by setting ``na_sentinel=None``. + + >>> values = np.array([1, 2, 1, np.nan]) + >>> codes, uniques = pd.factorize(values) # default: na_sentinel=-1 + >>> codes + array([ 0, 1, 0, -1]) + >>> uniques + array([1., 2.]) + + >>> codes, uniques = pd.factorize(values, na_sentinel=None) + >>> codes + array([0, 1, 0, 2]) + >>> uniques + array([ 1., 2., nan]) """ # Implementation notes: This method is responsible for 3 things # 1.) coercing data to array-like (ndarray, Index, extension array) @@ -633,6 +651,13 @@ def factorize( values = _ensure_arraylike(values) original = values + # GH35667, if na_sentinel=None, we will not dropna NaNs from the uniques + # of values, assign na_sentinel=-1 to replace code value for NaN. + dropna = True + if na_sentinel is None: + na_sentinel = -1 + dropna = False + if is_extension_array_dtype(values.dtype): values = extract_array(values) codes, uniques = values.factorize(na_sentinel=na_sentinel) diff --git a/pandas/core/base.py b/pandas/core/base.py index b62ef668df5e1..1926803d8f04b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1398,7 +1398,7 @@ def memory_usage(self, deep=False): """ ), ) - def factorize(self, sort=False, na_sentinel=-1): + def factorize(self, sort: bool = False, na_sentinel: Optional[int] = -1): return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel) _shared_docs[ diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 3017521c6a065..6678edc3821c8 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -587,8 +587,13 @@ def _make_codes(self) -> None: codes = self.grouper.codes_info uniques = self.grouper.result_index else: + # GH35667, replace dropna=False with na_sentinel=None + if not self.dropna: + na_sentinel = None + else: + na_sentinel = -1 codes, uniques = algorithms.factorize( - self.grouper, sort=self.sort, dropna=self.dropna + self.grouper, sort=self.sort, na_sentinel=na_sentinel ) uniques = Index(uniques, name=self.name) self._codes = codes diff --git a/pandas/tests/base/test_factorize.py b/pandas/tests/base/test_factorize.py index 415a8b7e4362f..9fad9856d53cc 100644 --- a/pandas/tests/base/test_factorize.py +++ b/pandas/tests/base/test_factorize.py @@ -26,3 +26,16 @@ def test_factorize(index_or_series_obj, sort): tm.assert_numpy_array_equal(result_codes, expected_codes) tm.assert_index_equal(result_uniques, expected_uniques) + + +def test_series_factorize_na_sentinel_none(): + # GH35667 + values = np.array([1, 2, 1, np.nan]) + ser = pd.Series(values) + codes, uniques = ser.factorize(na_sentinel=None) + + expected_codes = np.array([0, 1, 0, 2], dtype="int64") + expected_uniques = pd.Index([1.0, 2.0, np.nan]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_index_equal(uniques, expected_uniques) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 67a2dc2303550..b4e97f1e341e4 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -340,73 +340,47 @@ def test_factorize_na_sentinel(self, sort, na_sentinel, data, uniques): tm.assert_extension_array_equal(uniques, expected_uniques) @pytest.mark.parametrize( - "data, dropna, expected_codes, expected_uniques", + "data, expected_codes, expected_uniques", [ ( ["a", None, "b", "a"], - True, - np.array([0, -1, 1, 0], dtype=np.dtype("intp")), - np.array(["a", "b"], dtype=object), - ), - ( - ["a", np.nan, "b", "a"], - True, - np.array([0, -1, 1, 0], dtype=np.dtype("intp")), - np.array(["a", "b"], dtype=object), - ), - ( - ["a", None, "b", "a"], - False, np.array([0, 2, 1, 0], dtype=np.dtype("intp")), np.array(["a", "b", np.nan], dtype=object), ), ( ["a", np.nan, "b", "a"], - False, np.array([0, 2, 1, 0], dtype=np.dtype("intp")), np.array(["a", "b", np.nan], dtype=object), ), ], ) - def test_object_factorize_dropna( - self, data, dropna, expected_codes, expected_uniques + def test_object_factorize_na_sentinel_none( + self, data, expected_codes, expected_uniques ): - codes, uniques = algos.factorize(data, dropna=dropna) + codes, uniques = algos.factorize(data, na_sentinel=None) tm.assert_numpy_array_equal(uniques, expected_uniques) tm.assert_numpy_array_equal(codes, expected_codes) @pytest.mark.parametrize( - "data, dropna, expected_codes, expected_uniques", + "data, expected_codes, expected_uniques", [ ( [1, None, 1, 2], - True, - np.array([0, -1, 0, 1], dtype=np.dtype("intp")), - np.array([1, 2], dtype="O"), - ), - ( - [1, np.nan, 1, 2], - True, - np.array([0, -1, 0, 1], dtype=np.dtype("intp")), - np.array([1, 2], dtype=np.float64), - ), - ( - [1, None, 1, 2], - False, np.array([0, 2, 0, 1], dtype=np.dtype("intp")), np.array([1, 2, np.nan], dtype="O"), ), ( [1, np.nan, 1, 2], - False, np.array([0, 2, 0, 1], dtype=np.dtype("intp")), np.array([1, 2, np.nan], dtype=np.float64), ), ], ) - def test_int_factorize_dropna(self, data, dropna, expected_codes, expected_uniques): - codes, uniques = algos.factorize(data, dropna=dropna) + def test_int_factorize_na_sentinel_none( + self, data, expected_codes, expected_uniques + ): + codes, uniques = algos.factorize(data, na_sentinel=None) tm.assert_numpy_array_equal(uniques, expected_uniques) tm.assert_numpy_array_equal(codes, expected_codes) From 7d047c24748be6d7efaaa7054fd581922d3c1781 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 2 Sep 2020 16:41:39 +0100 Subject: [PATCH 396/632] TYP: update setup.cfg (#36067) --- setup.cfg | 3 --- 1 file changed, 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 2d1c8037636de..29c731848de8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -321,9 +321,6 @@ check_untyped_defs=False [mypy-pandas.plotting._matplotlib.core] check_untyped_defs=False -[mypy-pandas.plotting._matplotlib.misc] -check_untyped_defs=False - [mypy-pandas.plotting._misc] check_untyped_defs=False From 09c6124caae58b51617e723b9926e749715f3d36 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 2 Sep 2020 16:42:30 +0100 Subject: [PATCH 397/632] TYP: statically define attributes in plotting._matplotlib.core (#36068) pandas\plotting\_matplotlib\core.py:231: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:232: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:233: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:235: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:385: error: "MPLPlot" has no attribute "label"; maybe "ylabel" or "xlabel"? [attr-defined] pandas\plotting\_matplotlib\core.py:553: error: "MPLPlot" has no attribute "mark_right" [attr-defined] pandas\plotting\_matplotlib\core.py:732: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:733: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:735: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:738: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:739: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:741: error: "MPLPlot" has no attribute "style" [attr-defined] pandas\plotting\_matplotlib\core.py:1008: error: "ScatterPlot" has no attribute "label" [attr-defined] pandas\plotting\_matplotlib\core.py:1075: error: "LinePlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1180: error: "LinePlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1269: error: "AreaPlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1351: error: "BarPlot" has no attribute "stacked" [attr-defined] pandas\plotting\_matplotlib\core.py:1427: error: "BarPlot" has no attribute "stacked" [attr-defined] --- pandas/plotting/_matplotlib/core.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 93ba9bd26630b..5270c7362d29f 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -66,16 +66,6 @@ def _kind(self): _layout_type = "vertical" _default_rot = 0 orientation: Optional[str] = None - _pop_attributes = [ - "label", - "style", - "mark_right", - "stacked", - ] - _attr_defaults = { - "mark_right": True, - "stacked": False, - } def __init__( self, @@ -165,9 +155,10 @@ def __init__( self.logx = kwds.pop("logx", False) self.logy = kwds.pop("logy", False) self.loglog = kwds.pop("loglog", False) - for attr in self._pop_attributes: - value = kwds.pop(attr, self._attr_defaults.get(attr, None)) - setattr(self, attr, value) + self.label = kwds.pop("label", None) + self.style = kwds.pop("style", None) + self.mark_right = kwds.pop("mark_right", True) + self.stacked = kwds.pop("stacked", False) self.ax = ax self.fig = fig From c41e500b2e145dcd1a07196daff14181c88fc379 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 2 Sep 2020 08:43:16 -0700 Subject: [PATCH 398/632] BUG: frame._item_cache not cleared when Series is altered (#36051) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/generic.py | 4 ++++ pandas/tests/frame/test_missing.py | 15 +++++++++++---- .../tests/indexing/test_chaining_and_caching.py | 15 +++++++++++++++ 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index fdfb084b47a89..c740c7b3882c9 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -32,6 +32,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`36051`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 233d48bfc85c3..486bea7cd1b47 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3315,6 +3315,10 @@ def _maybe_update_cacher( if len(self) == len(ref): # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self) + else: + # GH#33675 we have swapped in a new array, so parent + # reference to self is now invalid + ref._item_cache.pop(cacher[0], None) if verify_is_copy: self._check_setitem_copy(stacklevel=5, t="referant") diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 9bf5d24085697..b4f91590e09d1 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -135,13 +135,20 @@ def test_drop_and_dropna_caching(self): df2 = df.copy() df["A"].dropna() tm.assert_series_equal(df["A"], original) - return_value = df["A"].dropna(inplace=True) - tm.assert_series_equal(df["A"], expected) + + ser = df["A"] + return_value = ser.dropna(inplace=True) + tm.assert_series_equal(ser, expected) + tm.assert_series_equal(df["A"], original) assert return_value is None + df2["A"].drop([1]) tm.assert_series_equal(df2["A"], original) - return_value = df2["A"].drop([1], inplace=True) - tm.assert_series_equal(df2["A"], original.drop([1])) + + ser = df2["A"] + return_value = ser.drop([1], inplace=True) + tm.assert_series_equal(ser, original.drop([1])) + tm.assert_series_equal(df2["A"], original) assert return_value is None def test_dropna_corner(self, float_frame): diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index fa5fe5ba5c384..9910ef1b04b1a 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -81,6 +81,21 @@ def test_setitem_cache_updating(self): tm.assert_frame_equal(out, expected) tm.assert_series_equal(out["A"], expected["A"]) + def test_altering_series_clears_parent_cache(self): + # GH #33675 + df = pd.DataFrame([[1, 2], [3, 4]], index=["a", "b"], columns=["A", "B"]) + ser = df["A"] + + assert "A" in df._item_cache + + # Adding a new entry to ser swaps in a new array, so "A" needs to + # be removed from df._item_cache + ser["c"] = 5 + assert len(ser) == 3 + assert "A" not in df._item_cache + assert df["A"] is not ser + assert len(df["A"]) == 2 + class TestChaining: def test_setitem_chained_setfault(self): From abe6ad85fd6791a73daea7bf1fa4d83c5b1d146a Mon Sep 17 00:00:00 2001 From: tiagohonorato <61059243+tiagohonorato@users.noreply.github.com> Date: Wed, 2 Sep 2020 13:14:00 -0300 Subject: [PATCH 399/632] CLN remove trailing commas (#36057) --- pandas/tests/arithmetic/test_interval.py | 4 +--- pandas/tests/arithmetic/test_numeric.py | 4 ++-- pandas/tests/arrays/boolean/test_logical.py | 4 +--- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/pandas/tests/arithmetic/test_interval.py b/pandas/tests/arithmetic/test_interval.py index 50b5fe8e6f6b9..72ef7ea6bf8ca 100644 --- a/pandas/tests/arithmetic/test_interval.py +++ b/pandas/tests/arithmetic/test_interval.py @@ -156,9 +156,7 @@ def test_compare_scalar_other(self, op, array, other): expected = self.elementwise_comparison(op, array, other) tm.assert_numpy_array_equal(result, expected) - def test_compare_list_like_interval( - self, op, array, interval_constructor, - ): + def test_compare_list_like_interval(self, op, array, interval_constructor): # same endpoints other = interval_constructor(array.left, array.right) result = op(array, other) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 484f83deb0f55..ecac08ffe3ba2 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -99,7 +99,7 @@ class TestNumericArraylikeArithmeticWithDatetimeLike: # TODO: also check name retentention @pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series]) @pytest.mark.parametrize( - "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype), + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) ) def test_mul_td64arr(self, left, box_cls): # GH#22390 @@ -119,7 +119,7 @@ def test_mul_td64arr(self, left, box_cls): # TODO: also check name retentention @pytest.mark.parametrize("box_cls", [np.array, pd.Index, pd.Series]) @pytest.mark.parametrize( - "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype), + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) ) def test_div_td64arr(self, left, box_cls): # GH#22390 diff --git a/pandas/tests/arrays/boolean/test_logical.py b/pandas/tests/arrays/boolean/test_logical.py index e79262e1b7934..8ed1c27087b02 100644 --- a/pandas/tests/arrays/boolean/test_logical.py +++ b/pandas/tests/arrays/boolean/test_logical.py @@ -205,9 +205,7 @@ def test_kleene_xor_scalar(self, other, expected): a, pd.array([True, False, None], dtype="boolean") ) - @pytest.mark.parametrize( - "other", [True, False, pd.NA, [True, False, None] * 3], - ) + @pytest.mark.parametrize("other", [True, False, pd.NA, [True, False, None] * 3]) def test_no_masked_assumptions(self, other, all_logical_operators): # The logical operations should not assume that masked values are False! a = pd.arrays.BooleanArray( From a0d92b65614629086ae15ed9af703a8b2494bece Mon Sep 17 00:00:00 2001 From: Chuanzhu Xu Date: Wed, 2 Sep 2020 12:19:48 -0400 Subject: [PATCH 400/632] CLN remove unnecessary trailing commas in groupby tests (#36059) --- pandas/tests/groupby/test_timegrouper.py | 2 +- pandas/tests/groupby/transform/test_numba.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index 84fd7a1bdfb05..4ccbc6a65fd88 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -780,6 +780,6 @@ def test_grouper_period_index(self): result = period_series.groupby(period_series.index.month).sum() expected = pd.Series( - range(0, periods), index=Index(range(1, periods + 1), name=index.name), + range(0, periods), index=Index(range(1, periods + 1), name=index.name) ) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index ee482571e644d..87723cd7c8f50 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -56,7 +56,7 @@ def func(values, index): func = numba.jit(func) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -89,7 +89,7 @@ def func_2(values, index): func_2 = numba.jit(func_2) data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} grouped = data.groupby(0) @@ -120,7 +120,7 @@ def func_1(values, index): return values + 1 data = DataFrame( - {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1], + {0: ["a", "a", "b", "b", "a"], 1: [1.0, 2.0, 3.0, 4.0, 5.0]}, columns=[0, 1] ) grouped = data.groupby(0) expected = grouped.transform(func_1, engine="numba") From 675a54164f444eea051e50285e8ba9fab061fd14 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 2 Sep 2020 10:54:27 -0700 Subject: [PATCH 401/632] CLN: rename private functions used across modules (#36049) --- pandas/plotting/_matplotlib/boxplot.py | 16 +++++++-------- pandas/plotting/_matplotlib/core.py | 24 +++++++++++----------- pandas/plotting/_matplotlib/hist.py | 20 ++++++++++-------- pandas/plotting/_matplotlib/misc.py | 14 ++++++------- pandas/plotting/_matplotlib/style.py | 2 +- pandas/plotting/_matplotlib/tools.py | 20 +++++++++--------- pandas/tests/plotting/common.py | 16 +++++++-------- pandas/tests/plotting/test_misc.py | 20 +++++++++--------- pandas/tests/plotting/test_series.py | 28 +++++++++++++------------- 9 files changed, 82 insertions(+), 78 deletions(-) diff --git a/pandas/plotting/_matplotlib/boxplot.py b/pandas/plotting/_matplotlib/boxplot.py index 01fe98a6f5403..8ceba22b1f7a4 100644 --- a/pandas/plotting/_matplotlib/boxplot.py +++ b/pandas/plotting/_matplotlib/boxplot.py @@ -12,8 +12,8 @@ from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot -from pandas.plotting._matplotlib.style import _get_standard_colors -from pandas.plotting._matplotlib.tools import _flatten, _subplots +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import create_subplots, flatten_axes if TYPE_CHECKING: from matplotlib.axes import Axes @@ -84,7 +84,7 @@ def _validate_color_args(self): self.color = None # get standard colors for default - colors = _get_standard_colors(num_colors=3, colormap=self.colormap, color=None) + colors = get_standard_colors(num_colors=3, colormap=self.colormap, color=None) # use 2 colors by default, for box/whisker and median # flier colors isn't needed here # because it can be specified by ``sym`` kw @@ -200,11 +200,11 @@ def _grouped_plot_by_column( by = [by] columns = data._get_numeric_data().columns.difference(by) naxes = len(columns) - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, sharex=True, sharey=True, figsize=figsize, ax=ax, layout=layout ) - _axes = _flatten(axes) + _axes = flatten_axes(axes) ax_values = [] @@ -259,7 +259,7 @@ def _get_colors(): # num_colors=3 is required as method maybe_color_bp takes the colors # in positions 0 and 2. # if colors not provided, use same defaults as DataFrame.plot.box - result = _get_standard_colors(num_colors=3) + result = get_standard_colors(num_colors=3) result = np.take(result, [0, 0, 2]) result = np.append(result, "k") @@ -414,7 +414,7 @@ def boxplot_frame_groupby( ): if subplots is True: naxes = len(grouped) - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, squeeze=False, ax=ax, @@ -423,7 +423,7 @@ def boxplot_frame_groupby( figsize=figsize, layout=layout, ) - axes = _flatten(axes) + axes = flatten_axes(axes) ret = pd.Series(dtype=object) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 5270c7362d29f..2d64e1b051444 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -32,14 +32,14 @@ from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters -from pandas.plotting._matplotlib.style import _get_standard_colors +from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( - _flatten, - _get_all_lines, - _get_xlim, - _handle_shared_axes, - _subplots, + create_subplots, + flatten_axes, format_date_labels, + get_all_lines, + get_xlim, + handle_shared_axes, table, ) @@ -306,7 +306,7 @@ def _maybe_right_yaxis(self, ax: "Axes", axes_num): def _setup_subplots(self): if self.subplots: - fig, axes = _subplots( + fig, axes = create_subplots( naxes=self.nseries, sharex=self.sharex, sharey=self.sharey, @@ -325,7 +325,7 @@ def _setup_subplots(self): fig.set_size_inches(self.figsize) axes = self.ax - axes = _flatten(axes) + axes = flatten_axes(axes) valid_log = {False, True, "sym", None} input_log = {self.logx, self.logy, self.loglog} @@ -457,7 +457,7 @@ def _adorn_subplots(self): if len(self.axes) > 0: all_axes = self._get_subplots() nrows, ncols = self._get_axes_layout() - _handle_shared_axes( + handle_shared_axes( axarr=all_axes, nplots=len(all_axes), naxes=nrows * ncols, @@ -744,7 +744,7 @@ def _get_colors(self, num_colors=None, color_kwds="color"): if num_colors is None: num_colors = self.nseries - return _get_standard_colors( + return get_standard_colors( num_colors=num_colors, colormap=self.colormap, color=self.kwds.get(color_kwds), @@ -1123,8 +1123,8 @@ def _make_plot(self): # reset of xlim should be used for ts data # TODO: GH28021, should find a way to change view limit on xaxis - lines = _get_all_lines(ax) - left, right = _get_xlim(lines) + lines = get_all_lines(ax) + left, right = get_xlim(lines) ax.set_xlim(left, right) @classmethod diff --git a/pandas/plotting/_matplotlib/hist.py b/pandas/plotting/_matplotlib/hist.py index ffd46d1b191db..89035552d4309 100644 --- a/pandas/plotting/_matplotlib/hist.py +++ b/pandas/plotting/_matplotlib/hist.py @@ -8,7 +8,11 @@ from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot -from pandas.plotting._matplotlib.tools import _flatten, _set_ticks_props, _subplots +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + set_ticks_props, +) if TYPE_CHECKING: from matplotlib.axes import Axes @@ -198,11 +202,11 @@ def _grouped_plot( grouped = grouped[column] naxes = len(grouped) - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout ) - _axes = _flatten(axes) + _axes = flatten_axes(axes) for i, (key, group) in enumerate(grouped): ax = _axes[i] @@ -286,7 +290,7 @@ def plot_group(group, ax): rot=rot, ) - _set_ticks_props( + set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) @@ -337,7 +341,7 @@ def hist_series( ax.grid(grid) axes = np.array([ax]) - _set_ticks_props( + set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) @@ -419,7 +423,7 @@ def hist_frame( if naxes == 0: raise ValueError("hist method requires numerical columns, nothing to plot.") - fig, axes = _subplots( + fig, axes = create_subplots( naxes=naxes, ax=ax, squeeze=False, @@ -428,7 +432,7 @@ def hist_frame( figsize=figsize, layout=layout, ) - _axes = _flatten(axes) + _axes = flatten_axes(axes) can_set_label = "label" not in kwds @@ -442,7 +446,7 @@ def hist_frame( if legend: ax.legend() - _set_ticks_props( + set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) fig.subplots_adjust(wspace=0.3, hspace=0.3) diff --git a/pandas/plotting/_matplotlib/misc.py b/pandas/plotting/_matplotlib/misc.py index c5e7c55970c3e..a1c62f9fce23c 100644 --- a/pandas/plotting/_matplotlib/misc.py +++ b/pandas/plotting/_matplotlib/misc.py @@ -10,8 +10,8 @@ from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing -from pandas.plotting._matplotlib.style import _get_standard_colors -from pandas.plotting._matplotlib.tools import _set_ticks_props, _subplots +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import create_subplots, set_ticks_props if TYPE_CHECKING: from matplotlib.axes import Axes @@ -36,7 +36,7 @@ def scatter_matrix( df = frame._get_numeric_data() n = df.columns.size naxes = n * n - fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) + fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) # no gaps between subplots fig.subplots_adjust(wspace=0, hspace=0) @@ -112,7 +112,7 @@ def scatter_matrix( locs = locs.astype(int) axes[0][0].yaxis.set_ticklabels(locs) - _set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) return axes @@ -147,7 +147,7 @@ def normalize(series): ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1]) to_plot: Dict[Label, List[List]] = {} - colors = _get_standard_colors( + colors = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) @@ -255,7 +255,7 @@ def f(t): t = np.linspace(-np.pi, np.pi, samples) used_legends: Set[str] = set() - color_values = _get_standard_colors( + color_values = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) colors = dict(zip(classes, color_values)) @@ -382,7 +382,7 @@ def parallel_coordinates( if ax is None: ax = plt.gca() - color_values = _get_standard_colors( + color_values = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 5f1105f0e4233..904a760a03e58 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -10,7 +10,7 @@ import pandas.core.common as com -def _get_standard_colors( +def get_standard_colors( num_colors=None, colormap=None, color_type: str = "default", color=None ): import matplotlib.pyplot as plt diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 4d643ffb734e4..98aaab6838fba 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -100,7 +100,7 @@ def _get_layout(nplots: int, layout=None, layout_type: str = "box") -> Tuple[int # copied from matplotlib/pyplot.py and modified for pandas.plotting -def _subplots( +def create_subplots( naxes: int, sharex: bool = False, sharey: bool = False, @@ -194,7 +194,7 @@ def _subplots( fig = plt.figure(**fig_kw) else: if is_list_like(ax): - ax = _flatten(ax) + ax = flatten_axes(ax) if layout is not None: warnings.warn( "When passing multiple axes, layout keyword is ignored", UserWarning @@ -221,7 +221,7 @@ def _subplots( if squeeze: return fig, ax else: - return fig, _flatten(ax) + return fig, flatten_axes(ax) else: warnings.warn( "To output multiple subplots, the figure containing " @@ -264,7 +264,7 @@ def _subplots( for ax in axarr[naxes:]: ax.set_visible(False) - _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) + handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: # Reshape the array to have the final desired dimension (nrow,ncol), @@ -297,7 +297,7 @@ def _remove_labels_from_axis(axis: "Axis"): axis.get_label().set_visible(False) -def _handle_shared_axes( +def handle_shared_axes( axarr: Iterable["Axes"], nplots: int, naxes: int, @@ -351,7 +351,7 @@ def _handle_shared_axes( _remove_labels_from_axis(ax.yaxis) -def _flatten(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: +def flatten_axes(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: if not is_list_like(axes): return np.array([axes]) elif isinstance(axes, (np.ndarray, ABCIndexClass)): @@ -359,7 +359,7 @@ def _flatten(axes: Union["Axes", Sequence["Axes"]]) -> Sequence["Axes"]: return np.array(axes) -def _set_ticks_props( +def set_ticks_props( axes: Union["Axes", Sequence["Axes"]], xlabelsize=None, xrot=None, @@ -368,7 +368,7 @@ def _set_ticks_props( ): import matplotlib.pyplot as plt - for ax in _flatten(axes): + for ax in flatten_axes(axes): if xlabelsize is not None: plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) if xrot is not None: @@ -380,7 +380,7 @@ def _set_ticks_props( return axes -def _get_all_lines(ax: "Axes") -> List["Line2D"]: +def get_all_lines(ax: "Axes") -> List["Line2D"]: lines = ax.get_lines() if hasattr(ax, "right_ax"): @@ -392,7 +392,7 @@ def _get_all_lines(ax: "Axes") -> List["Line2D"]: return lines -def _get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: +def get_xlim(lines: Iterable["Line2D"]) -> Tuple[float, float]: left, right = np.inf, -np.inf for l in lines: x = l.get_xdata(orig=False) diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index 3b1ff233c5ec1..b753c96af6290 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -13,13 +13,13 @@ from pandas import DataFrame, Series import pandas._testing as tm -""" -This is a common base class used for various plotting tests -""" - @td.skip_if_no_mpl class TestPlotBase: + """ + This is a common base class used for various plotting tests + """ + def setup_method(self, method): import matplotlib as mpl @@ -330,7 +330,7 @@ def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None): figsize : tuple expected figsize. default is matplotlib default """ - from pandas.plotting._matplotlib.tools import _flatten + from pandas.plotting._matplotlib.tools import flatten_axes if figsize is None: figsize = self.default_figsize @@ -343,7 +343,7 @@ def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None): assert len(ax.get_children()) > 0 if layout is not None: - result = self._get_axes_layout(_flatten(axes)) + result = self._get_axes_layout(flatten_axes(axes)) assert result == layout tm.assert_numpy_array_equal( @@ -370,9 +370,9 @@ def _flatten_visible(self, axes): axes : matplotlib Axes object, or its list-like """ - from pandas.plotting._matplotlib.tools import _flatten + from pandas.plotting._matplotlib.tools import flatten_axes - axes = _flatten(axes) + axes = flatten_axes(axes) axes = [ax for ax in axes if ax.get_visible()] return axes diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index f5c1c58f3f7ed..130acaa8bcd58 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -353,7 +353,7 @@ def test_get_standard_colors_random_seed(self): # GH17525 df = DataFrame(np.zeros((10, 10))) - # Make sure that the random seed isn't reset by _get_standard_colors + # Make sure that the random seed isn't reset by get_standard_colors plotting.parallel_coordinates(df, 0) rand1 = random.random() plotting.parallel_coordinates(df, 0) @@ -361,19 +361,19 @@ def test_get_standard_colors_random_seed(self): assert rand1 != rand2 # Make sure it produces the same colors every time it's called - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors - color1 = _get_standard_colors(1, color_type="random") - color2 = _get_standard_colors(1, color_type="random") + color1 = get_standard_colors(1, color_type="random") + color2 = get_standard_colors(1, color_type="random") assert color1 == color2 def test_get_standard_colors_default_num_colors(self): - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors # Make sure the default color_types returns the specified amount - color1 = _get_standard_colors(1, color_type="default") - color2 = _get_standard_colors(9, color_type="default") - color3 = _get_standard_colors(20, color_type="default") + color1 = get_standard_colors(1, color_type="default") + color2 = get_standard_colors(9, color_type="default") + color3 = get_standard_colors(20, color_type="default") assert len(color1) == 1 assert len(color2) == 9 assert len(color3) == 20 @@ -401,10 +401,10 @@ def test_get_standard_colors_no_appending(self): # correctly. from matplotlib import cm - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors color_before = cm.gnuplot(range(5)) - color_after = _get_standard_colors(1, color=color_before) + color_after = get_standard_colors(1, color=color_before) assert len(color_after) == len(color_before) df = DataFrame(np.random.randn(48, 4), columns=list("ABCD")) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index cc00626e992f3..c296e2a6278c5 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -809,53 +809,53 @@ def test_series_grid_settings(self): @pytest.mark.slow def test_standard_colors(self): - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors for c in ["r", "red", "green", "#FF0000"]: - result = _get_standard_colors(1, color=c) + result = get_standard_colors(1, color=c) assert result == [c] - result = _get_standard_colors(1, color=[c]) + result = get_standard_colors(1, color=[c]) assert result == [c] - result = _get_standard_colors(3, color=c) + result = get_standard_colors(3, color=c) assert result == [c] * 3 - result = _get_standard_colors(3, color=[c]) + result = get_standard_colors(3, color=[c]) assert result == [c] * 3 @pytest.mark.slow def test_standard_colors_all(self): import matplotlib.colors as colors - from pandas.plotting._matplotlib.style import _get_standard_colors + from pandas.plotting._matplotlib.style import get_standard_colors # multiple colors like mediumaquamarine for c in colors.cnames: - result = _get_standard_colors(num_colors=1, color=c) + result = get_standard_colors(num_colors=1, color=c) assert result == [c] - result = _get_standard_colors(num_colors=1, color=[c]) + result = get_standard_colors(num_colors=1, color=[c]) assert result == [c] - result = _get_standard_colors(num_colors=3, color=c) + result = get_standard_colors(num_colors=3, color=c) assert result == [c] * 3 - result = _get_standard_colors(num_colors=3, color=[c]) + result = get_standard_colors(num_colors=3, color=[c]) assert result == [c] * 3 # single letter colors like k for c in colors.ColorConverter.colors: - result = _get_standard_colors(num_colors=1, color=c) + result = get_standard_colors(num_colors=1, color=c) assert result == [c] - result = _get_standard_colors(num_colors=1, color=[c]) + result = get_standard_colors(num_colors=1, color=[c]) assert result == [c] - result = _get_standard_colors(num_colors=3, color=c) + result = get_standard_colors(num_colors=3, color=c) assert result == [c] * 3 - result = _get_standard_colors(num_colors=3, color=[c]) + result = get_standard_colors(num_colors=3, color=[c]) assert result == [c] * 3 def test_series_plot_color_kwargs(self): From caca6e18dc80adaf08f3f6cc85c070a0244aac11 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 2 Sep 2020 19:44:39 +0100 Subject: [PATCH 402/632] TYP: misc typing in core\indexes\base.py (#35991) --- pandas/core/frame.py | 6 ++-- pandas/core/indexes/base.py | 51 ++++++++++++++++++++++++++------- pandas/core/indexes/interval.py | 6 +++- 3 files changed, 48 insertions(+), 15 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e78c15d125e8d..5b8c421db3ce1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1776,13 +1776,13 @@ def from_records( arrays = [data[k] for k in columns] else: arrays = [] - arr_columns = [] + arr_columns_list = [] for k, v in data.items(): if k in columns: - arr_columns.append(k) + arr_columns_list.append(k) arrays.append(v) - arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns) + arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a07c3328def54..48b02fc525cc1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -10,6 +10,8 @@ Hashable, List, Optional, + Sequence, + TypeVar, Union, ) import warnings @@ -22,7 +24,7 @@ from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import tz_compare -from pandas._typing import DtypeObj, Label +from pandas._typing import AnyArrayLike, Dtype, DtypeObj, Label from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import InvalidIndexError @@ -98,7 +100,7 @@ ) if TYPE_CHECKING: - from pandas import Series + from pandas import RangeIndex, Series __all__ = ["Index"] @@ -188,6 +190,9 @@ def _new_Index(cls, d): return cls.__new__(cls, **d) +_IndexT = TypeVar("_IndexT", bound="Index") + + class Index(IndexOpsMixin, PandasObject): """ Immutable ndarray implementing an ordered, sliceable set. The basic object @@ -787,7 +792,13 @@ def repeat(self, repeats, axis=None): # -------------------------------------------------------------------- # Copying Methods - def copy(self, name=None, deep=False, dtype=None, names=None): + def copy( + self: _IndexT, + name: Optional[Label] = None, + deep: bool = False, + dtype: Optional[Dtype] = None, + names: Optional[Sequence[Label]] = None, + ) -> _IndexT: """ Make a copy of this object. @@ -949,10 +960,9 @@ def _format_with_header( # could have nans mask = isna(values) if mask.any(): - result = np.array(result) - result[mask] = na_rep - # error: "List[str]" has no attribute "tolist" - result = result.tolist() # type: ignore[attr-defined] + result_arr = np.array(result) + result_arr[mask] = na_rep + result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result @@ -4913,7 +4923,13 @@ def _get_string_slice(self, key: str_t, use_lhs: bool = True, use_rhs: bool = Tr # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError - def slice_indexer(self, start=None, end=None, step=None, kind=None): + def slice_indexer( + self, + start: Optional[Label] = None, + end: Optional[Label] = None, + step: Optional[int] = None, + kind: Optional[str_t] = None, + ) -> slice: """ Compute the slice indexer for input labels and step. @@ -5513,7 +5529,9 @@ def ensure_index_from_sequences(sequences, names=None): return MultiIndex.from_arrays(sequences, names=names) -def ensure_index(index_like, copy: bool = False): +def ensure_index( + index_like: Union[AnyArrayLike, Sequence], copy: bool = False +) -> Index: """ Ensure that we have an index from some index-like object. @@ -5549,7 +5567,18 @@ def ensure_index(index_like, copy: bool = False): index_like = index_like.copy() return index_like if hasattr(index_like, "name"): - return Index(index_like, name=index_like.name, copy=copy) + # https://github.com/python/mypy/issues/1424 + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # Sequence[Any]]" has no attribute "name" [union-attr] + # error: Item "Sequence[Any]" of "Union[ExtensionArray, Sequence[Any]]" + # has no attribute "name" [union-attr] + # error: "Sequence[Any]" has no attribute "name" [attr-defined] + # error: Item "Sequence[Any]" of "Union[Series, Sequence[Any]]" has no + # attribute "name" [union-attr] + # error: Item "Sequence[Any]" of "Union[Any, Sequence[Any]]" has no + # attribute "name" [union-attr] + name = index_like.name # type: ignore[union-attr, attr-defined] + return Index(index_like, name=name, copy=copy) if is_iterator(index_like): index_like = list(index_like) @@ -5604,7 +5633,7 @@ def _validate_join_method(method: str): raise ValueError(f"do not recognize join method {method}") -def default_index(n): +def default_index(n: int) -> "RangeIndex": from pandas.core.indexes.range import RangeIndex return RangeIndex(0, n, name=None) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 5d309ef7cd515..08f9bd51de77b 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,7 +1,7 @@ """ define the IntervalIndex """ from operator import le, lt import textwrap -from typing import Any, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union, cast import numpy as np @@ -56,6 +56,9 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range from pandas.core.ops import get_op_result_name +if TYPE_CHECKING: + from pandas import CategoricalIndex + _VALID_CLOSED = {"left", "right", "both", "neither"} _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -786,6 +789,7 @@ def get_indexer( right_indexer = self.right.get_indexer(target_as_index.right) indexer = np.where(left_indexer == right_indexer, left_indexer, -1) elif is_categorical_dtype(target_as_index.dtype): + target_as_index = cast("CategoricalIndex", target_as_index) # get an indexer for unique categories then propagate to codes via take_1d categories_indexer = self.get_indexer(target_as_index.categories) indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1) From f635d17030b60b0d710a2322d12e5390c655b19d Mon Sep 17 00:00:00 2001 From: Byron Boulton Date: Wed, 2 Sep 2020 15:09:22 -0400 Subject: [PATCH 403/632] DOC: Fix typo of `=!` to `!=` in docstring (#36077) This fixes GH36075. --- pandas/core/ops/docstrings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/core/ops/docstrings.py b/pandas/core/ops/docstrings.py index 99c2fefc97ae7..e3a68ad328d55 100644 --- a/pandas/core/ops/docstrings.py +++ b/pandas/core/ops/docstrings.py @@ -611,7 +611,7 @@ def _make_flex_doc(op_name, typ): Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison operators. -Equivalent to `==`, `=!`, `<=`, `<`, `>=`, `>` with support to choose axis +Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis (rows or columns) and level for comparison. Parameters From 00483a17d84ef7f9254622cd8ce0f85b5d3eecf9 Mon Sep 17 00:00:00 2001 From: Sarthak Vineet Kumar Date: Thu, 3 Sep 2020 00:51:29 +0530 Subject: [PATCH 404/632] cleared commas (#36073) Co-authored-by: Sarthak --- pandas/tests/scalar/test_na_scalar.py | 10 +++------- pandas/tests/scalar/timestamp/test_arithmetic.py | 4 ++-- pandas/tests/series/methods/test_argsort.py | 2 +- pandas/tests/series/methods/test_convert_dtypes.py | 4 ++-- pandas/tests/series/methods/test_drop_duplicates.py | 2 +- pandas/tests/series/methods/test_interpolate.py | 4 ++-- pandas/tests/series/methods/test_unstack.py | 6 ++---- pandas/tests/series/test_cumulative.py | 2 +- pandas/tests/test_algos.py | 2 +- 9 files changed, 15 insertions(+), 21 deletions(-) diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index dc5eb15348c1b..0a7dfbee4e672 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -111,7 +111,7 @@ def test_pow_special(value, asarray): @pytest.mark.parametrize( - "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float_(1)], + "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float_(1)] ) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_special(value, asarray): @@ -128,9 +128,7 @@ def test_rpow_special(value, asarray): assert result == value -@pytest.mark.parametrize( - "value", [-1, -1.0, np.int_(-1), np.float_(-1)], -) +@pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float_(-1)]) @pytest.mark.parametrize("asarray", [True, False]) def test_rpow_minus_one(value, asarray): if asarray: @@ -193,9 +191,7 @@ def test_logical_not(): assert ~NA is NA -@pytest.mark.parametrize( - "shape", [(3,), (3, 3), (1, 2, 3)], -) +@pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)]) def test_arithmetic_ndarray(shape, all_arithmetic_functions): op = all_arithmetic_functions a = np.zeros(shape) diff --git a/pandas/tests/scalar/timestamp/test_arithmetic.py b/pandas/tests/scalar/timestamp/test_arithmetic.py index 954301b979074..1e980b6e4559c 100644 --- a/pandas/tests/scalar/timestamp/test_arithmetic.py +++ b/pandas/tests/scalar/timestamp/test_arithmetic.py @@ -213,7 +213,7 @@ def test_add_int_with_freq(self, ts, other): with pytest.raises(TypeError, match=msg): other - ts - @pytest.mark.parametrize("shape", [(6,), (2, 3,)]) + @pytest.mark.parametrize("shape", [(6,), (2, 3)]) def test_addsub_m8ndarray(self, shape): # GH#33296 ts = Timestamp("2020-04-04 15:45") @@ -237,7 +237,7 @@ def test_addsub_m8ndarray(self, shape): with pytest.raises(TypeError, match=msg): other - ts - @pytest.mark.parametrize("shape", [(6,), (2, 3,)]) + @pytest.mark.parametrize("shape", [(6,), (2, 3)]) def test_addsub_m8ndarray_tzaware(self, shape): # GH#33296 ts = Timestamp("2020-04-04 15:45", tz="US/Pacific") diff --git a/pandas/tests/series/methods/test_argsort.py b/pandas/tests/series/methods/test_argsort.py index 4353eb4c8cd64..ec9ba468c996c 100644 --- a/pandas/tests/series/methods/test_argsort.py +++ b/pandas/tests/series/methods/test_argsort.py @@ -9,7 +9,7 @@ class TestSeriesArgsort: def _check_accum_op(self, name, ser, check_dtype=True): func = getattr(np, name) tm.assert_numpy_array_equal( - func(ser).values, func(np.array(ser)), check_dtype=check_dtype, + func(ser).values, func(np.array(ser)), check_dtype=check_dtype ) # with missing values diff --git a/pandas/tests/series/methods/test_convert_dtypes.py b/pandas/tests/series/methods/test_convert_dtypes.py index dd4bf642e68e8..8a915324a72c1 100644 --- a/pandas/tests/series/methods/test_convert_dtypes.py +++ b/pandas/tests/series/methods/test_convert_dtypes.py @@ -219,10 +219,10 @@ class TestSeriesConvertDtypes: pd.to_datetime(["2020-01-14 10:00", "2020-01-15 11:11"]), object, { - ((True,), (True, False), (True, False), (True, False),): np.dtype( + ((True,), (True, False), (True, False), (True, False)): np.dtype( "datetime64[ns]" ), - ((False,), (True, False), (True, False), (True, False),): np.dtype( + ((False,), (True, False), (True, False), (True, False)): np.dtype( "O" ), }, diff --git a/pandas/tests/series/methods/test_drop_duplicates.py b/pandas/tests/series/methods/test_drop_duplicates.py index 40651c4342e8a..6eb0e09f12658 100644 --- a/pandas/tests/series/methods/test_drop_duplicates.py +++ b/pandas/tests/series/methods/test_drop_duplicates.py @@ -141,7 +141,7 @@ def test_drop_duplicates_categorical_non_bool(self, dtype, ordered): def test_drop_duplicates_categorical_bool(self, ordered): tc = Series( Categorical( - [True, False, True, False], categories=[True, False], ordered=ordered, + [True, False, True, False], categories=[True, False], ordered=ordered ) ) diff --git a/pandas/tests/series/methods/test_interpolate.py b/pandas/tests/series/methods/test_interpolate.py index c4b10e0ccdc3e..cba9443005f2f 100644 --- a/pandas/tests/series/methods/test_interpolate.py +++ b/pandas/tests/series/methods/test_interpolate.py @@ -30,7 +30,7 @@ ] ) def nontemporal_method(request): - """ Fixture that returns an (method name, required kwargs) pair. + """Fixture that returns an (method name, required kwargs) pair. This fixture does not include method 'time' as a parameterization; that method requires a Series with a DatetimeIndex, and is generally tested @@ -60,7 +60,7 @@ def nontemporal_method(request): ] ) def interp_methods_ind(request): - """ Fixture that returns a (method name, required kwargs) pair to + """Fixture that returns a (method name, required kwargs) pair to be tested for various Index types. This fixture does not include methods - 'time', 'index', 'nearest', diff --git a/pandas/tests/series/methods/test_unstack.py b/pandas/tests/series/methods/test_unstack.py index cdf6a16e88ad0..d651315d64561 100644 --- a/pandas/tests/series/methods/test_unstack.py +++ b/pandas/tests/series/methods/test_unstack.py @@ -75,9 +75,7 @@ def test_unstack_tuplename_in_multiindex(): expected = pd.DataFrame( [[1, 1, 1], [1, 1, 1], [1, 1, 1]], - columns=pd.MultiIndex.from_tuples( - [("a",), ("b",), ("c",)], names=[("A", "a")], - ), + columns=pd.MultiIndex.from_tuples([("a",), ("b",), ("c",)], names=[("A", "a")]), index=pd.Index([1, 2, 3], name=("B", "b")), ) tm.assert_frame_equal(result, expected) @@ -115,7 +113,7 @@ def test_unstack_mixed_type_name_in_multiindex( result = ser.unstack(unstack_idx) expected = pd.DataFrame( - expected_values, columns=expected_columns, index=expected_index, + expected_values, columns=expected_columns, index=expected_index ) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_cumulative.py b/pandas/tests/series/test_cumulative.py index 0b4c5f091106a..e070b86717503 100644 --- a/pandas/tests/series/test_cumulative.py +++ b/pandas/tests/series/test_cumulative.py @@ -17,7 +17,7 @@ def _check_accum_op(name, series, check_dtype=True): func = getattr(np, name) tm.assert_numpy_array_equal( - func(series).values, func(np.array(series)), check_dtype=check_dtype, + func(series).values, func(np.array(series)), check_dtype=check_dtype ) # with missing values diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index b4e97f1e341e4..59c6a5d53e7bb 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -944,7 +944,7 @@ def test_isin_int_df_string_search(self): @pytest.mark.xfail(reason="problem related with issue #34125") def test_isin_nan_df_string_search(self): """Comparing df with nan value (np.nan,2) with a string at isin() ("NaN") - -> should not match values because np.nan is not equal str NaN """ + -> should not match values because np.nan is not equal str NaN""" df = pd.DataFrame({"values": [np.nan, 2]}) result = df.isin(["NaN"]) expected_false = pd.DataFrame({"values": [False, False]}) From 52272450b131b916ca3a15c81e8e0986d86fda64 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 2 Sep 2020 22:31:48 +0100 Subject: [PATCH 405/632] TYP: Postponed Evaluation of Annotations (PEP 563) (#36034) --- pandas/core/algorithms.py | 10 ++++++---- pandas/core/construction.py | 13 ++++++------- pandas/core/frame.py | 5 +++-- pandas/core/generic.py | 16 +++++++++------- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index d2af6c132eca2..9d75d21c5637a 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -2,6 +2,8 @@ Generic data algorithms. This module is experimental at the moment and not intended for public consumption """ +from __future__ import annotations + import operator from textwrap import dedent from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union @@ -707,7 +709,7 @@ def value_counts( normalize: bool = False, bins=None, dropna: bool = True, -) -> "Series": +) -> Series: """ Compute a histogram of the counts of non-null values. @@ -849,7 +851,7 @@ def duplicated(values, keep="first") -> np.ndarray: return f(values, keep=keep) -def mode(values, dropna: bool = True) -> "Series": +def mode(values, dropna: bool = True) -> Series: """ Returns the mode(s) of an array. @@ -1161,7 +1163,7 @@ class SelectNSeries(SelectN): nordered : Series """ - def compute(self, method: str) -> "Series": + def compute(self, method: str) -> Series: n = self.n dtype = self.obj.dtype @@ -1235,7 +1237,7 @@ def __init__(self, obj, n: int, keep: str, columns): columns = list(columns) self.columns = columns - def compute(self, method: str) -> "DataFrame": + def compute(self, method: str) -> DataFrame: from pandas import Int64Index diff --git a/pandas/core/construction.py b/pandas/core/construction.py index f145e76046bee..02b8ed17244cd 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -4,6 +4,7 @@ These should not depend on core.internals. """ +from __future__ import annotations from collections import abc from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast @@ -49,16 +50,14 @@ import pandas.core.common as com if TYPE_CHECKING: - from pandas.core.arrays import ExtensionArray # noqa: F401 - from pandas.core.indexes.api import Index # noqa: F401 - from pandas.core.series import Series # noqa: F401 + from pandas import ExtensionArray, Index, Series def array( data: Union[Sequence[object], AnyArrayLike], dtype: Optional[Dtype] = None, copy: bool = True, -) -> "ExtensionArray": +) -> ExtensionArray: """ Create an array. @@ -389,7 +388,7 @@ def extract_array(obj, extract_numpy: bool = False): def sanitize_array( data, - index: Optional["Index"], + index: Optional[Index], dtype: Optional[DtypeObj] = None, copy: bool = False, raise_cast_failure: bool = False, @@ -594,13 +593,13 @@ def is_empty_data(data: Any) -> bool: def create_series_with_explicit_dtype( data: Any = None, - index: Optional[Union[ArrayLike, "Index"]] = None, + index: Optional[Union[ArrayLike, Index]] = None, dtype: Optional[Dtype] = None, name: Optional[str] = None, copy: bool = False, fastpath: bool = False, dtype_if_empty: Dtype = object, -) -> "Series": +) -> Series: """ Helper to pass an explicit dtype when instantiating an empty Series. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5b8c421db3ce1..7832547685567 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8,6 +8,7 @@ alignment and a host of useful data manipulation methods having to do with the labeling information """ +from __future__ import annotations import collections from collections import abc @@ -885,7 +886,7 @@ def to_string( # ---------------------------------------------------------------------- @property - def style(self) -> "Styler": + def style(self) -> Styler: """ Returns a Styler object. @@ -6530,7 +6531,7 @@ def groupby( squeeze: bool = no_default, observed: bool = False, dropna: bool = True, - ) -> "DataFrameGroupBy": + ) -> DataFrameGroupBy: from pandas.core.groupby.generic import DataFrameGroupBy if squeeze is not no_default: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 486bea7cd1b47..fd924c964c1e1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import collections from datetime import timedelta import functools @@ -110,7 +112,7 @@ from pandas._libs.tslibs import BaseOffset from pandas.core.resample import Resampler - from pandas.core.series import Series # noqa: F401 + from pandas.core.series import Series from pandas.core.window.indexers import BaseIndexer # goal is to be able to define the docs close to function, while still being @@ -391,7 +393,7 @@ def _get_block_manager_axis(cls, axis: Axis) -> int: return m - axis return axis - def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex]]: + def _get_axis_resolvers(self, axis: str) -> Dict[str, Union[Series, MultiIndex]]: # index or columns axis_index = getattr(self, axis) d = dict() @@ -421,10 +423,10 @@ def _get_axis_resolvers(self, axis: str) -> Dict[str, Union["Series", MultiIndex d[axis] = dindex return d - def _get_index_resolvers(self) -> Dict[str, Union["Series", MultiIndex]]: + def _get_index_resolvers(self) -> Dict[str, Union[Series, MultiIndex]]: from pandas.core.computation.parsing import clean_column_name - d: Dict[str, Union["Series", MultiIndex]] = {} + d: Dict[str, Union[Series, MultiIndex]] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) @@ -660,7 +662,7 @@ def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: result = self.set_axis(new_labels, axis=axis, inplace=False) return result - def pop(self, item: Label) -> Union["Series", Any]: + def pop(self, item: Label) -> Union[Series, Any]: result = self[item] del self[item] if self.ndim == 2: @@ -7684,7 +7686,7 @@ def resample( level=None, origin: Union[str, TimestampConvertibleTypes] = "start_day", offset: Optional[TimedeltaConvertibleTypes] = None, - ) -> "Resampler": + ) -> Resampler: """ Resample time-series data. @@ -10457,7 +10459,7 @@ def mad(self, axis=None, skipna=None, level=None): @doc(Rolling) def rolling( self, - window: "Union[int, timedelta, BaseOffset, BaseIndexer]", + window: Union[int, timedelta, BaseOffset, BaseIndexer], min_periods: Optional[int] = None, center: bool_t = False, win_type: Optional[str] = None, From 76f74d53bf3682c84ad8e2e64c41dfdfae9975cc Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Wed, 2 Sep 2020 14:36:37 -0700 Subject: [PATCH 406/632] BUG: Index.get_slice_bounds does not accept datetime.date or tz naive datetime.datetimes (#35848) --- doc/source/whatsnew/v1.2.0.rst | 3 +- pandas/core/indexes/datetimes.py | 5 +-- .../tests/indexes/base_class/test_indexing.py | 26 ++++++++++++ .../tests/indexes/datetimes/test_indexing.py | 42 ++++++++++++++++++- pandas/tests/indexes/test_numeric.py | 19 +++++++++ 5 files changed, 90 insertions(+), 5 deletions(-) create mode 100644 pandas/tests/indexes/base_class/test_indexing.py diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 0cfe010b63a6f..9c8ee10a8a0af 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -176,7 +176,8 @@ Datetimelike - Bug in :attr:`DatetimeArray.date` where a ``ValueError`` would be raised with a read-only backing array (:issue:`33530`) - Bug in ``NaT`` comparisons failing to raise ``TypeError`` on invalid inequality comparisons (:issue:`35046`) - Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) -- +- Bug in :meth:`DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`DatetimeIndex` (:issue:`35690`) +- Bug in :meth:`DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e66f513e347a9..6dcb9250812d0 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -632,7 +632,7 @@ def get_loc(self, key, method=None, tolerance=None): raise KeyError(orig_key) from err def _maybe_cast_for_get_loc(self, key) -> Timestamp: - # needed to localize naive datetimes + # needed to localize naive datetimes or dates (GH 35690) key = Timestamp(key) if key.tzinfo is None: key = key.tz_localize(self.tz) @@ -677,8 +677,7 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): if self._is_strictly_monotonic_decreasing and len(self) > 1: return upper if side == "left" else lower return lower if side == "left" else upper - else: - return label + return self._maybe_cast_for_get_loc(label) def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True): freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None)) diff --git a/pandas/tests/indexes/base_class/test_indexing.py b/pandas/tests/indexes/base_class/test_indexing.py new file mode 100644 index 0000000000000..196c0401a72be --- /dev/null +++ b/pandas/tests/indexes/base_class/test_indexing.py @@ -0,0 +1,26 @@ +import pytest + +from pandas import Index + + +class TestGetSliceBounds: + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_within(self, kind, side, expected): + index = Index(list("abcdef")) + result = index.get_slice_bound("e", kind=kind, side=side) + assert result == expected + + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize( + "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)], + ) + def test_get_slice_bounds_outside(self, kind, side, expected, data, bound): + index = Index(data) + result = index.get_slice_bound(bound, kind=kind, side=side) + assert result == expected + + def test_get_slice_bounds_invalid_side(self): + with pytest.raises(ValueError, match="Invalid value for side kwarg"): + Index([]).get_slice_bound("a", kind=None, side="middle") diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 5d2c6daba3f57..539d9cb8f06a7 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -6,7 +6,7 @@ from pandas.errors import InvalidIndexError import pandas as pd -from pandas import DatetimeIndex, Index, Timestamp, date_range, notna +from pandas import DatetimeIndex, Index, Timestamp, bdate_range, date_range, notna import pandas._testing as tm from pandas.tseries.offsets import BDay, CDay @@ -665,3 +665,43 @@ def test_get_value(self): with tm.assert_produces_warning(FutureWarning): result = dti.get_value(ser, key.to_datetime64()) assert result == 7 + + +class TestGetSliceBounds: + @pytest.mark.parametrize("box", [date, datetime, Timestamp]) + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_datetime_within( + self, box, kind, side, expected, tz_aware_fixture + ): + # GH 35690 + index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz_aware_fixture) + result = index.get_slice_bound( + box(year=2000, month=1, day=7), kind=kind, side=side + ) + assert result == expected + + @pytest.mark.parametrize("box", [date, datetime, Timestamp]) + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize("year, expected", [(1999, 0), (2020, 30)]) + def test_get_slice_bounds_datetime_outside( + self, box, kind, side, year, expected, tz_aware_fixture + ): + # GH 35690 + index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz_aware_fixture) + result = index.get_slice_bound( + box(year=year, month=1, day=7), kind=kind, side=side + ) + assert result == expected + + @pytest.mark.parametrize("box", [date, datetime, Timestamp]) + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + def test_slice_datetime_locs(self, box, kind, tz_aware_fixture): + # GH 34077 + index = DatetimeIndex(["2010-01-01", "2010-01-03"]).tz_localize( + tz_aware_fixture + ) + result = index.slice_locs(box(2010, 1, 1), box(2010, 1, 2)) + expected = (0, 1) + assert result == expected diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index e6f455e60eee3..1ffdbbc9afd3f 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -679,3 +679,22 @@ def test_float64_index_difference(): result = string_index.difference(float_index) tm.assert_index_equal(result, string_index) + + +class TestGetSliceBounds: + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)]) + def test_get_slice_bounds_within(self, kind, side, expected): + index = Index(range(6)) + result = index.get_slice_bound(4, kind=kind, side=side) + assert result == expected + + @pytest.mark.parametrize("kind", ["getitem", "loc", None]) + @pytest.mark.parametrize("side", ["left", "right"]) + @pytest.mark.parametrize( + "bound, expected", [(-1, 0), (10, 6)], + ) + def test_get_slice_bounds_outside(self, kind, side, expected, bound): + index = Index(range(6)) + result = index.get_slice_bound(bound, kind=kind, side=side) + assert result == expected From 81e32364c1501f9f15ee6d93dbedad2e898cf1e0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 2 Sep 2020 19:56:33 -0700 Subject: [PATCH 407/632] REF: use BlockManager.apply for cython_agg_blocks, apply_blockwise (#35900) --- pandas/core/groupby/generic.py | 21 +++++---------------- pandas/core/internals/managers.py | 30 ++++++++++++++++++++++++------ pandas/core/window/rolling.py | 21 ++++----------------- 3 files changed, 33 insertions(+), 39 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index a92e3af0764a7..537feace59fcb 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1035,8 +1035,6 @@ def _cython_agg_blocks( if numeric_only: data = data.get_numeric_data(copy=False) - agg_blocks: List["Block"] = [] - no_result = object() def cast_agg_result(result, values: ArrayLike, how: str) -> ArrayLike: @@ -1118,23 +1116,14 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: res_values = cast_agg_result(result, bvalues, how) return res_values - for i, block in enumerate(data.blocks): - try: - nbs = block.apply(blk_func) - except (NotImplementedError, TypeError): - # TypeError -> we may have an exception in trying to aggregate - # continue and exclude the block - # NotImplementedError -> "ohlc" with wrong dtype - pass - else: - agg_blocks.extend(nbs) + # TypeError -> we may have an exception in trying to aggregate + # continue and exclude the block + # NotImplementedError -> "ohlc" with wrong dtype + new_mgr = data.apply(blk_func, ignore_failures=True) - if not agg_blocks: + if not len(new_mgr): raise DataError("No numeric types to aggregate") - # reset the locs in the blocks to correspond to our - # current ordering - new_mgr = data._combine(agg_blocks) return new_mgr def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 389252e7ef0f2..2e3098d94afcb 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -350,7 +350,13 @@ def operate_blockwise(self, other: "BlockManager", array_op) -> "BlockManager": """ return operate_blockwise(self, other, array_op) - def apply(self: T, f, align_keys=None, **kwargs) -> T: + def apply( + self: T, + f, + align_keys: Optional[List[str]] = None, + ignore_failures: bool = False, + **kwargs, + ) -> T: """ Iterate over the blocks, collect and create a new BlockManager. @@ -358,6 +364,10 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T: ---------- f : str or callable Name of the Block method to apply. + align_keys: List[str] or None, default None + ignore_failures: bool, default False + **kwargs + Keywords to pass to `f` Returns ------- @@ -387,12 +397,20 @@ def apply(self: T, f, align_keys=None, **kwargs) -> T: # otherwise we have an ndarray kwargs[k] = obj[b.mgr_locs.indexer] - if callable(f): - applied = b.apply(f, **kwargs) - else: - applied = getattr(b, f)(**kwargs) + try: + if callable(f): + applied = b.apply(f, **kwargs) + else: + applied = getattr(b, f)(**kwargs) + except (TypeError, NotImplementedError): + if not ignore_failures: + raise + continue result_blocks = _extend_blocks(applied, result_blocks) + if ignore_failures: + return self._combine(result_blocks) + if len(result_blocks) == 0: return self.make_empty(self.axes) @@ -704,7 +722,7 @@ def get_numeric_data(self, copy: bool = False) -> "BlockManager": self._consolidate_inplace() return self._combine([b for b in self.blocks if b.is_numeric], copy) - def _combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager": + def _combine(self: T, blocks: List[Block], copy: bool = True) -> T: """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index a3f60c0bc5098..558c0eeb0ea65 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -489,8 +489,6 @@ def _apply_blockwise( if self._selected_obj.ndim == 1: return self._apply_series(homogeneous_func) - # This isn't quite blockwise, since `blocks` is actually a collection - # of homogenenous DataFrames. _, obj = self._create_blocks(self._selected_obj) mgr = obj._mgr @@ -500,25 +498,14 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike: res_values = homogeneous_func(values) return getattr(res_values, "T", res_values) - skipped: List[int] = [] - res_blocks: List["Block"] = [] - for i, blk in enumerate(mgr.blocks): - try: - nbs = blk.apply(hfunc) - - except (TypeError, NotImplementedError): - skipped.append(i) - continue - - res_blocks.extend(nbs) + new_mgr = mgr.apply(hfunc, ignore_failures=True) + out = obj._constructor(new_mgr) - if not len(res_blocks) and skipped: + if out.shape[1] == 0 and obj.shape[1] > 0: raise DataError("No numeric types to aggregate") - elif not len(res_blocks): + elif out.shape[1] == 0: return obj.astype("float64") - new_mgr = mgr._combine(res_blocks) - out = obj._constructor(new_mgr) self._insert_on_column(out, obj) return out From 76eb314a0ef53c907da14f74a3745c5084a1428a Mon Sep 17 00:00:00 2001 From: Tom Augspurger Date: Wed, 2 Sep 2020 22:00:00 -0500 Subject: [PATCH 408/632] Optionally disallow duplicate labels (#28394) --- doc/source/reference/frame.rst | 16 + .../reference/general_utility_functions.rst | 1 + doc/source/reference/series.rst | 15 + doc/source/user_guide/duplicates.rst | 210 ++++++++ doc/source/user_guide/index.rst | 1 + doc/source/whatsnew/v1.2.0.rst | 49 ++ pandas/__init__.py | 1 + pandas/_testing.py | 15 + pandas/core/api.py | 1 + pandas/core/flags.py | 113 +++++ pandas/core/frame.py | 11 +- pandas/core/generic.py | 130 ++++- pandas/core/indexes/base.py | 48 +- pandas/core/series.py | 10 +- pandas/errors/__init__.py | 21 + pandas/tests/api/test_api.py | 1 + pandas/tests/base/test_misc.py | 3 +- pandas/tests/frame/test_api.py | 27 ++ pandas/tests/generic/test_duplicate_labels.py | 450 ++++++++++++++++++ pandas/tests/generic/test_generic.py | 10 + pandas/tests/series/test_api.py | 26 + pandas/tests/test_flags.py | 48 ++ pandas/tests/util/test_assert_frame_equal.py | 15 + pandas/tests/util/test_assert_series_equal.py | 15 + 24 files changed, 1227 insertions(+), 10 deletions(-) create mode 100644 doc/source/user_guide/duplicates.rst create mode 100644 pandas/core/flags.py create mode 100644 pandas/tests/generic/test_duplicate_labels.py create mode 100644 pandas/tests/test_flags.py diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index 4d9d18e3d204e..9a1ebc8d670dc 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -37,6 +37,7 @@ Attributes and underlying data DataFrame.shape DataFrame.memory_usage DataFrame.empty + DataFrame.set_flags Conversion ~~~~~~~~~~ @@ -276,6 +277,21 @@ Time Series-related DataFrame.tz_convert DataFrame.tz_localize +.. _api.frame.flags: + +Flags +~~~~~ + +Flags refer to attributes of the pandas object. Properties of the dataset (like +the date is was recorded, the URL it was accessed from, etc.) should be stored +in :attr:`DataFrame.attrs`. + +.. autosummary:: + :toctree: api/ + + Flags + + .. _api.frame.metadata: Metadata diff --git a/doc/source/reference/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst index c1759110b94ad..3cba0a81a7011 100644 --- a/doc/source/reference/general_utility_functions.rst +++ b/doc/source/reference/general_utility_functions.rst @@ -37,6 +37,7 @@ Exceptions and warnings errors.AccessorRegistrationWarning errors.DtypeWarning + errors.DuplicateLabelError errors.EmptyDataError errors.InvalidIndexError errors.MergeError diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index ae3e121ca8212..5131d35334693 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -39,6 +39,8 @@ Attributes Series.empty Series.dtypes Series.name + Series.flags + Series.set_flags Conversion ---------- @@ -527,6 +529,19 @@ Sparse-dtype specific methods and attributes are provided under the Series.sparse.from_coo Series.sparse.to_coo +.. _api.series.flags: + +Flags +~~~~~ + +Flags refer to attributes of the pandas object. Properties of the dataset (like +the date is was recorded, the URL it was accessed from, etc.) should be stored +in :attr:`Series.attrs`. + +.. autosummary:: + :toctree: api/ + + Flags .. _api.series.metadata: diff --git a/doc/source/user_guide/duplicates.rst b/doc/source/user_guide/duplicates.rst new file mode 100644 index 0000000000000..b65822fab2b23 --- /dev/null +++ b/doc/source/user_guide/duplicates.rst @@ -0,0 +1,210 @@ +.. _duplicates: + +**************** +Duplicate Labels +**************** + +:class:`Index` objects are not required to be unique; you can have duplicate row +or column labels. This may be a bit confusing at first. If you're familiar with +SQL, you know that row labels are similar to a primary key on a table, and you +would never want duplicates in a SQL table. But one of pandas' roles is to clean +messy, real-world data before it goes to some downstream system. And real-world +data has duplicates, even in fields that are supposed to be unique. + +This section describes how duplicate labels change the behavior of certain +operations, and how prevent duplicates from arising during operations, or to +detect them if they do. + +.. ipython:: python + + import pandas as pd + import numpy as np + +Consequences of Duplicate Labels +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some pandas methods (:meth:`Series.reindex` for example) just don't work with +duplicates present. The output can't be determined, and so pandas raises. + +.. ipython:: python + :okexcept: + + s1 = pd.Series([0, 1, 2], index=['a', 'b', 'b']) + s1.reindex(['a', 'b', 'c']) + +Other methods, like indexing, can give very surprising results. Typically +indexing with a scalar will *reduce dimensionality*. Slicing a ``DataFrame`` +with a scalar will return a ``Series``. Slicing a ``Series`` with a scalar will +return a scalar. But with duplicates, this isn't the case. + +.. ipython:: python + + df1 = pd.DataFrame([[0, 1, 2], [3, 4, 5]], columns=['A', 'A', 'B']) + df1 + +We have duplicates in the columns. If we slice ``'B'``, we get back a ``Series`` + +.. ipython:: python + + df1['B'] # a series + +But slicing ``'A'`` returns a ``DataFrame`` + + +.. ipython:: python + + df1['A'] # a DataFrame + +This applies to row labels as well + +.. ipython:: python + + df2 = pd.DataFrame({"A": [0, 1, 2]}, index=['a', 'a', 'b']) + df2 + df2.loc['b', 'A'] # a scalar + df2.loc['a', 'A'] # a Series + +Duplicate Label Detection +~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can check whether an :class:`Index` (storing the row or column labels) is +unique with :attr:`Index.is_unique`: + +.. ipython:: python + + df2 + df2.index.is_unique + df2.columns.is_unique + +.. note:: + + Checking whether an index is unique is somewhat expensive for large datasets. + Pandas does cache this result, so re-checking on the same index is very fast. + +:meth:`Index.duplicated` will return a boolean ndarray indicating whether a +label is repeated. + +.. ipython:: python + + df2.index.duplicated() + +Which can be used as a boolean filter to drop duplicate rows. + +.. ipython:: python + + df2.loc[~df2.index.duplicated(), :] + +If you need additional logic to handle duplicate labels, rather than just +dropping the repeats, using :meth:`~DataFrame.groupby` on the index is a common +trick. For example, we'll resolve duplicates by taking the average of all rows +with the same label. + +.. ipython:: python + + df2.groupby(level=0).mean() + +.. _duplicates.disallow: + +Disallowing Duplicate Labels +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. versionadded:: 1.2.0 + +As noted above, handling duplicates is an important feature when reading in raw +data. That said, you may want to avoid introducing duplicates as part of a data +processing pipeline (from methods like :meth:`pandas.concat`, +:meth:`~DataFrame.rename`, etc.). Both :class:`Series` and :class:`DataFrame` +*disallow* duplicate labels by calling ``.set_flags(allows_duplicate_labels=False)``. +(the default is to allow them). If there are duplicate labels, an exception +will be raised. + +.. ipython:: python + :okexcept: + + pd.Series( + [0, 1, 2], + index=['a', 'b', 'b'] + ).set_flags(allows_duplicate_labels=False) + +This applies to both row and column labels for a :class:`DataFrame` + +.. ipython:: python + :okexcept: + + pd.DataFrame( + [[0, 1, 2], [3, 4, 5]], columns=["A", "B", "C"], + ).set_flags(allows_duplicate_labels=False) + +This attribute can be checked or set with :attr:`~DataFrame.flags.allows_duplicate_labels`, +which indicates whether that object can have duplicate labels. + +.. ipython:: python + + df = ( + pd.DataFrame({"A": [0, 1, 2, 3]}, + index=['x', 'y', 'X', 'Y']) + .set_flags(allows_duplicate_labels=False) + ) + df + df.flags.allows_duplicate_labels + +:meth:`DataFrame.set_flags` can be used to return a new ``DataFrame`` with attributes +like ``allows_duplicate_labels`` set to some value + +.. ipython:: python + + df2 = df.set_flags(allows_duplicate_labels=True) + df2.flags.allows_duplicate_labels + +The new ``DataFrame`` returned is a view on the same data as the old ``DataFrame``. +Or the property can just be set directly on the same object + + +.. ipython:: python + + df2.flags.allows_duplicate_labels = False + df2.flags.allows_duplicate_labels + +When processing raw, messy data you might initially read in the messy data +(which potentially has duplicate labels), deduplicate, and then disallow duplicates +going forward, to ensure that your data pipeline doesn't introduce duplicates. + + +.. code-block:: python + + >>> raw = pd.read_csv("...") + >>> deduplicated = raw.groupby(level=0).first() # remove duplicates + >>> deduplicated.flags.allows_duplicate_labels = False # disallow going forward + +Setting ``allows_duplicate_labels=True`` on a ``Series`` or ``DataFrame`` with duplicate +labels or performing an operation that introduces duplicate labels on a ``Series`` or +``DataFrame`` that disallows duplicates will raise an +:class:`errors.DuplicateLabelError`. + +.. ipython:: python + :okexcept: + + df.rename(str.upper) + +This error message contains the labels that are duplicated, and the numeric positions +of all the duplicates (including the "original") in the ``Series`` or ``DataFrame`` + +Duplicate Label Propagation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In general, disallowing duplicates is "sticky". It's preserved through +operations. + +.. ipython:: python + :okexcept: + + s1 = pd.Series(0, index=['a', 'b']).set_flags(allows_duplicate_labels=False) + s1 + s1.head().rename({"a": "b"}) + +.. warning:: + + This is an experimental feature. Currently, many methods fail to + propagate the ``allows_duplicate_labels`` value. In future versions + it is expected that every method taking or returning one or more + DataFrame or Series objects will propagate ``allows_duplicate_labels``. diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst index 8226e72779588..2fc9e066e6712 100644 --- a/doc/source/user_guide/index.rst +++ b/doc/source/user_guide/index.rst @@ -33,6 +33,7 @@ Further information on any specific method can be obtained in the reshaping text missing_data + duplicates categorical integer_na boolean diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 9c8ee10a8a0af..7c083b95b21f3 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -13,6 +13,53 @@ including other versions of pandas. Enhancements ~~~~~~~~~~~~ +.. _whatsnew_120.duplicate_labels: + +Optionally disallow duplicate labels +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:class:`Series` and :class:`DataFrame` can now be created with ``allows_duplicate_labels=False`` flag to +control whether the index or columns can contain duplicate labels (:issue:`28394`). This can be used to +prevent accidental introduction of duplicate labels, which can affect downstream operations. + +By default, duplicates continue to be allowed + +.. ipython:: python + + pd.Series([1, 2], index=['a', 'a']) + +.. ipython:: python + :okexcept: + + pd.Series([1, 2], index=['a', 'a']).set_flags(allows_duplicate_labels=False) + +Pandas will propagate the ``allows_duplicate_labels`` property through many operations. + +.. ipython:: python + :okexcept: + + a = ( + pd.Series([1, 2], index=['a', 'b']) + .set_flags(allows_duplicate_labels=False) + ) + a + # An operation introducing duplicates + a.reindex(['a', 'b', 'a']) + +.. warning:: + + This is an experimental feature. Currently, many methods fail to + propagate the ``allows_duplicate_labels`` value. In future versions + it is expected that every method taking or returning one or more + DataFrame or Series objects will propagate ``allows_duplicate_labels``. + +See :ref:`duplicates` for more. + +The ``allows_duplicate_labels`` flag is stored in the new :attr:`DataFrame.flags` +attribute. This stores global attributes that apply to the *pandas object*. This +differs from :attr:`DataFrame.attrs`, which stores information that applies to +the dataset. + Passing arguments to fsspec backends ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -53,6 +100,8 @@ For example: Other enhancements ^^^^^^^^^^^^^^^^^^ + +- Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a ``Series`` or ``DataFrame`` (:issue:`28394`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - - diff --git a/pandas/__init__.py b/pandas/__init__.py index 36576da74c75d..2737bcd8f9ccf 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -100,6 +100,7 @@ to_datetime, to_timedelta, # misc + Flags, Grouper, factorize, unique, diff --git a/pandas/_testing.py b/pandas/_testing.py index b402b040d9268..04d36749a3d8c 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1225,6 +1225,7 @@ def assert_series_equal( check_categorical=True, check_category_order=True, check_freq=True, + check_flags=True, rtol=1.0e-5, atol=1.0e-8, obj="Series", @@ -1271,6 +1272,11 @@ def assert_series_equal( .. versionadded:: 1.0.2 check_freq : bool, default True Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + + .. versionadded:: 1.2.0 + rtol : float, default 1e-5 Relative tolerance. Only used when check_exact is False. @@ -1307,6 +1313,9 @@ def assert_series_equal( msg2 = f"{len(right)}, {right.index}" raise_assert_detail(obj, "Series length are different", msg1, msg2) + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + # index comparison assert_index_equal( left.index, @@ -1429,6 +1438,7 @@ def assert_frame_equal( check_categorical=True, check_like=False, check_freq=True, + check_flags=True, rtol=1.0e-5, atol=1.0e-8, obj="DataFrame", @@ -1490,6 +1500,8 @@ def assert_frame_equal( (same as in columns) - same labels must be with the same data. check_freq : bool, default True Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. rtol : float, default 1e-5 Relative tolerance. Only used when check_exact is False. @@ -1563,6 +1575,9 @@ def assert_frame_equal( if check_like: left, right = left.reindex_like(right), right + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + # index comparison assert_index_equal( left.index, diff --git a/pandas/core/api.py b/pandas/core/api.py index b0b65f9d0be34..348e9206d6e19 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -26,6 +26,7 @@ ) from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import array +from pandas.core.flags import Flags from pandas.core.groupby import Grouper, NamedAgg from pandas.core.indexes.api import ( CategoricalIndex, diff --git a/pandas/core/flags.py b/pandas/core/flags.py new file mode 100644 index 0000000000000..15966d8ddce2a --- /dev/null +++ b/pandas/core/flags.py @@ -0,0 +1,113 @@ +import weakref + + +class Flags: + """ + Flags that apply to pandas objects. + + .. versionadded:: 1.2.0 + + Parameters + ---------- + obj : Series or DataFrame + The object these flags are associated with + allows_duplicate_labels : bool, default True + Whether to allow duplicate labels in this object. By default, + duplicate labels are permitted. Setting this to ``False`` will + cause an :class:`errors.DuplicateLabelError` to be raised when + `index` (or columns for DataFrame) is not unique, or any + subsequent operation on introduces duplicates. + See :ref:`duplicates.disallow` for more. + + .. warning:: + + This is an experimental feature. Currently, many methods fail to + propagate the ``allows_duplicate_labels`` value. In future versions + it is expected that every method taking or returning one or more + DataFrame or Series objects will propagate ``allows_duplicate_labels``. + + Notes + ----- + Attributes can be set in two ways + + >>> df = pd.DataFrame() + >>> df.flags + + >>> df.flags.allows_duplicate_labels = False + >>> df.flags + + + >>> df.flags['allows_duplicate_labels'] = True + >>> df.flags + + """ + + _keys = {"allows_duplicate_labels"} + + def __init__(self, obj, *, allows_duplicate_labels): + self._allows_duplicate_labels = allows_duplicate_labels + self._obj = weakref.ref(obj) + + @property + def allows_duplicate_labels(self) -> bool: + """ + Whether this object allows duplicate labels. + + Setting ``allows_duplicate_labels=False`` ensures that the + index (and columns of a DataFrame) are unique. Most methods + that accept and return a Series or DataFrame will propagate + the value of ``allows_duplicate_labels``. + + See :ref:`duplicates` for more. + + See Also + -------- + DataFrame.attrs : Set global metadata on this object. + DataFrame.set_flags : Set global flags on this object. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}, index=['a', 'a']) + >>> df.allows_duplicate_labels + True + >>> df.allows_duplicate_labels = False + Traceback (most recent call last): + ... + pandas.errors.DuplicateLabelError: Index has duplicates. + positions + label + a [0, 1] + """ + return self._allows_duplicate_labels + + @allows_duplicate_labels.setter + def allows_duplicate_labels(self, value: bool): + value = bool(value) + obj = self._obj() + if obj is None: + raise ValueError("This flag's object has been deleted.") + + if not value: + for ax in obj.axes: + ax._maybe_check_unique() + + self._allows_duplicate_labels = value + + def __getitem__(self, key): + if key not in self._keys: + raise KeyError(key) + + return getattr(self, key) + + def __setitem__(self, key, value): + if key not in self._keys: + raise ValueError(f"Unknown flag {key}. Must be one of {self._keys}") + setattr(self, key, value) + + def __repr__(self): + return f"" + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.allows_duplicate_labels == other.allows_duplicate_labels + return False diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7832547685567..b4c12b9e52f56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -458,7 +458,9 @@ def __init__( if isinstance(data, BlockManager): if index is None and columns is None and dtype is None and copy is False: # GH#33357 fastpath - NDFrame.__init__(self, data) + NDFrame.__init__( + self, data, + ) return mgr = self._init_mgr( @@ -3659,6 +3661,11 @@ def insert(self, loc, column, value, allow_duplicates=False) -> None: value : int, Series, or array-like allow_duplicates : bool, optional """ + if allow_duplicates and not self.flags.allows_duplicate_labels: + raise ValueError( + "Cannot specify 'allow_duplicates=True' when " + "'self.flags.allows_duplicate_labels' is False." + ) self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates) @@ -4559,6 +4566,7 @@ def set_index( 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, "inplace") + self._check_inplace_and_allows_duplicate_labels(inplace) if not isinstance(keys, list): keys = [keys] @@ -4804,6 +4812,7 @@ class max type monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") + self._check_inplace_and_allows_duplicate_labels(inplace) if inplace: new_obj = self else: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fd924c964c1e1..c9eb4a34683f8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -94,6 +94,7 @@ from pandas.core.base import PandasObject, SelectionMixin import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype +from pandas.core.flags import Flags from pandas.core.indexes.api import Index, MultiIndex, RangeIndex, ensure_index from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import Period, PeriodIndex @@ -188,6 +189,7 @@ class NDFrame(PandasObject, SelectionMixin, indexing.IndexingMixin): "_metadata", "__array_struct__", "__array_interface__", + "_flags", ] _internal_names_set: Set[str] = set(_internal_names) _accessors: Set[str] = set() @@ -217,6 +219,7 @@ def __init__( else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) + object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) @classmethod def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager: @@ -237,15 +240,20 @@ def _init_mgr(cls, mgr, axes, dtype=None, copy: bool = False) -> BlockManager: return mgr # ---------------------------------------------------------------------- + # attrs and flags @property def attrs(self) -> Dict[Optional[Hashable], Any]: """ - Dictionary of global attributes on this object. + Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. + + See Also + -------- + DataFrame.flags """ if self._attrs is None: self._attrs = {} @@ -255,6 +263,96 @@ def attrs(self) -> Dict[Optional[Hashable], Any]: def attrs(self, value: Mapping[Optional[Hashable], Any]) -> None: self._attrs = dict(value) + @property + def flags(self) -> Flags: + """ + Get the properties associated with this pandas object. + + The available flags are + + * :attr:`Flags.allows_duplicate_labels` + + See Also + -------- + Flags + DataFrame.attrs + + Notes + ----- + "Flags" differ from "metadata". Flags reflect properties of the + pandas object (the Series or DataFrame). Metadata refer to properties + of the dataset, and should be stored in :attr:`DataFrame.attrs`. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}) + >>> df.flags + + + Flags can be get or set using ``.`` + + >>> df.flags.allows_duplicate_labels + True + >>> df.flags.allows_duplicate_labels = False + + Or by slicing with a key + + >>> df.flags["allows_duplicate_labels"] + False + >>> df.flags["allows_duplicate_labels"] = True + """ + return self._flags + + def set_flags( + self: FrameOrSeries, + *, + copy: bool = False, + allows_duplicate_labels: Optional[bool] = None, + ) -> FrameOrSeries: + """ + Return a new object with updated flags. + + Parameters + ---------- + allows_duplicate_labels : bool, optional + Whether the returned object allows duplicate labels. + + Returns + ------- + Series or DataFrame + The same type as the caller. + + See Also + -------- + DataFrame.attrs : Global metadata applying to this dataset. + DataFrame.flags : Global flags applying to this object. + + Notes + ----- + This method returns a new object that's a view on the same data + as the input. Mutating the input or the output values will be reflected + in the other. + + This method is intended to be used in method chains. + + "Flags" differ from "metadata". Flags reflect properties of the + pandas object (the Series or DataFrame). Metadata refer to properties + of the dataset, and should be stored in :attr:`DataFrame.attrs`. + + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2]}) + >>> df.flags.allows_duplicate_labels + True + >>> df2 = df.set_flags(allows_duplicate_labels=False) + >>> df2.flags.allows_duplicate_labels + False + """ + df = self.copy(deep=copy) + if allows_duplicate_labels is not None: + df.flags["allows_duplicate_labels"] = allows_duplicate_labels + return df + @classmethod def _validate_dtype(cls, dtype): """ validate the passed dtype """ @@ -557,6 +655,11 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ + self._check_inplace_and_allows_duplicate_labels(inplace) + return self._set_axis_nocheck(labels, axis, inplace) + + def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool): + # NDFrame.rename with inplace=False calls set_axis(inplace=True) on a copy. if inplace: setattr(self, self._get_axis_name(axis), labels) else: @@ -926,6 +1029,7 @@ def rename( else: index = mapper + self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy) for axis_no, replacements in enumerate((index, columns)): @@ -950,7 +1054,7 @@ def rename( raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level) - result.set_axis(new_index, axis=axis_no, inplace=True) + result._set_axis_nocheck(new_index, axis=axis_no, inplace=True) result._clear_item_cache() if inplace: @@ -1828,11 +1932,11 @@ def __getstate__(self) -> Dict[str, Any]: _typ=self._typ, _metadata=self._metadata, attrs=self.attrs, + _flags={k: self.flags[k] for k in self.flags._keys}, **meta, ) def __setstate__(self, state): - if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): @@ -1843,6 +1947,8 @@ def __setstate__(self, state): if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) + flags = state.get("_flags", dict(allows_duplicate_labels=True)) + object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion @@ -1850,7 +1956,7 @@ def __setstate__(self, state): # defined meta = set(self._internal_names + self._metadata) for k in list(meta): - if k in state: + if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) @@ -3802,6 +3908,13 @@ def __delitem__(self, key) -> None: # ---------------------------------------------------------------------- # Unsorted + def _check_inplace_and_allows_duplicate_labels(self, inplace): + if inplace and not self.flags.allows_duplicate_labels: + raise ValueError( + "Cannot specify 'inplace=True' when " + "'self.flags.allows_duplicate_labels' is False." + ) + def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). @@ -5163,10 +5276,19 @@ def __finalize__( if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] + + self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in self._metadata: assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) + + if method == "concat": + allows_duplicate_labels = all( + x.flags.allows_duplicate_labels for x in other.objs + ) + self.flags.allows_duplicate_labels = allows_duplicate_labels + return self def __getattr__(self, name: str): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 48b02fc525cc1..65b5dfb6df911 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -27,7 +27,7 @@ from pandas._typing import AnyArrayLike, Dtype, DtypeObj, Label from pandas.compat import set_function_name from pandas.compat.numpy import function as nv -from pandas.errors import InvalidIndexError +from pandas.errors import DuplicateLabelError, InvalidIndexError from pandas.util._decorators import Appender, Substitution, cache_readonly, doc from pandas.core.dtypes import concat as _concat @@ -488,6 +488,52 @@ def _simple_new(cls, values, name: Label = None): def _constructor(self): return type(self) + def _maybe_check_unique(self): + """ + Check that an Index has no duplicates. + + This is typically only called via + `NDFrame.flags.allows_duplicate_labels.setter` when it's set to + True (duplicates aren't allowed). + + Raises + ------ + DuplicateLabelError + When the index is not unique. + """ + if not self.is_unique: + msg = """Index has duplicates.""" + duplicates = self._format_duplicate_message() + msg += "\n{}".format(duplicates) + + raise DuplicateLabelError(msg) + + def _format_duplicate_message(self): + """ + Construct the DataFrame for a DuplicateLabelError. + + This returns a DataFrame indicating the labels and positions + of duplicates in an index. This should only be called when it's + already known that duplicates are present. + + Examples + -------- + >>> idx = pd.Index(['a', 'b', 'a']) + >>> idx._format_duplicate_message() + positions + label + a [0, 2] + """ + from pandas import Series + + duplicates = self[self.duplicated(keep="first")].unique() + assert len(duplicates) + + out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] + if self.nlevels == 1: + out = out.rename_axis("label") + return out.to_frame(name="positions") + # -------------------------------------------------------------------- # Index Internals Methods diff --git a/pandas/core/series.py b/pandas/core/series.py index a8a2d300fa168..9d84ce4b9ab2e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -201,7 +201,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): # Constructors def __init__( - self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False + self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False, ): if ( @@ -211,7 +211,9 @@ def __init__( and copy is False ): # GH#33357 called with just the SingleBlockManager - NDFrame.__init__(self, data) + NDFrame.__init__( + self, data, + ) self.name = name return @@ -330,7 +332,9 @@ def __init__( data = SingleBlockManager.from_array(data, index) - generic.NDFrame.__init__(self, data) + generic.NDFrame.__init__( + self, data, + ) self.name = name self._set_axis(0, index, fastpath=True) diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 6ac3004d29996..15389ca2c3e61 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -202,6 +202,27 @@ class NumbaUtilError(Exception): """ +class DuplicateLabelError(ValueError): + """ + Error raised when an operation would introduce duplicate labels. + + .. versionadded:: 1.2.0 + + Examples + -------- + >>> s = pd.Series([0, 1, 2], index=['a', 'b', 'c']).set_flags( + ... allows_duplicate_labels=False + ... ) + >>> s.reindex(['a', 'a', 'b']) + Traceback (most recent call last): + ... + DuplicateLabelError: Index has duplicates. + positions + label + a [0, 1] + """ + + class InvalidIndexError(Exception): """ Exception raised when attemping to use an invalid index key. diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 1d25336cd3b70..54da13c3c620b 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -61,6 +61,7 @@ class TestPDApi(Base): "ExcelFile", "ExcelWriter", "Float64Index", + "Flags", "Grouper", "HDFStore", "Index", diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py index 78a830c7f43d8..9523fba953ad0 100644 --- a/pandas/tests/base/test_misc.py +++ b/pandas/tests/base/test_misc.py @@ -99,7 +99,7 @@ def test_ndarray_compat_properties(index_or_series_obj): assert getattr(obj, p, None) is not None # deprecated properties - for p in ["flags", "strides", "itemsize", "base", "data"]: + for p in ["strides", "itemsize", "base", "data"]: assert not hasattr(obj, p) msg = "can only convert an array of size 1 to a Python scalar" @@ -116,6 +116,7 @@ def test_ndarray_compat_properties(index_or_series_obj): @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") def test_memory_usage(index_or_series_obj): obj = index_or_series_obj + res = obj.memory_usage() res_deep = obj.memory_usage(deep=True) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 0716cf5e27119..b1c31a6f90133 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -553,6 +553,33 @@ def test_attrs(self): result = df.rename(columns=str) assert result.attrs == {"version": 1} + @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None]) + def test_set_flags(self, allows_duplicate_labels): + df = pd.DataFrame({"A": [1, 2]}) + result = df.set_flags(allows_duplicate_labels=allows_duplicate_labels) + if allows_duplicate_labels is None: + # We don't update when it's not provided + assert result.flags.allows_duplicate_labels is True + else: + assert result.flags.allows_duplicate_labels is allows_duplicate_labels + + # We made a copy + assert df is not result + + # We didn't mutate df + assert df.flags.allows_duplicate_labels is True + + # But we didn't copy data + result.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 0 + + # Now we do copy. + result = df.set_flags( + copy=True, allows_duplicate_labels=allows_duplicate_labels + ) + result.iloc[0, 0] = 10 + assert df.iloc[0, 0] == 0 + def test_cache_on_copy(self): # GH 31784 _item_cache not cleared on copy causes incorrect reads after updates df = DataFrame({"a": [1]}) diff --git a/pandas/tests/generic/test_duplicate_labels.py b/pandas/tests/generic/test_duplicate_labels.py new file mode 100644 index 0000000000000..97468e1f10a8b --- /dev/null +++ b/pandas/tests/generic/test_duplicate_labels.py @@ -0,0 +1,450 @@ +"""Tests dealing with the NDFrame.allows_duplicates.""" +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +not_implemented = pytest.mark.xfail(reason="Not implemented.") + +# ---------------------------------------------------------------------------- +# Preservation + + +class TestPreserves: + @pytest.mark.parametrize( + "cls, data", + [ + (pd.Series, np.array([])), + (pd.Series, [1, 2]), + (pd.DataFrame, {}), + (pd.DataFrame, {"A": [1, 2]}), + ], + ) + def test_construction_ok(self, cls, data): + result = cls(data) + assert result.flags.allows_duplicate_labels is True + + result = cls(data).set_flags(allows_duplicate_labels=False) + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "func", + [ + operator.itemgetter(["a"]), + operator.methodcaller("add", 1), + operator.methodcaller("rename", str.upper), + operator.methodcaller("rename", "name"), + pytest.param(operator.methodcaller("abs"), marks=not_implemented), + # TODO: test np.abs + ], + ) + def test_preserved_series(self, func): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + assert func(s).flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])] + ) + # TODO: frame + @not_implemented + def test_align(self, other): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + a, b = s.align(other) + assert a.flags.allows_duplicate_labels is False + assert b.flags.allows_duplicate_labels is False + + def test_preserved_frame(self): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + assert df.loc[["a"]].flags.allows_duplicate_labels is False + assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False + + @not_implemented + def test_to_frame(self): + s = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False) + assert s.to_frame().flags.allows_duplicate_labels is False + + @pytest.mark.parametrize("func", ["add", "sub"]) + @pytest.mark.parametrize( + "frame", [False, pytest.param(True, marks=not_implemented)] + ) + @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")]) + def test_binops(self, func, other, frame): + df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + if frame: + df = df.to_frame() + if isinstance(other, pd.Series) and frame: + other = other.to_frame() + func = operator.methodcaller(func, other) + assert df.flags.allows_duplicate_labels is False + assert func(df).flags.allows_duplicate_labels is False + + @not_implemented + def test_preserve_getitem(self): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + assert df[["A"]].flags.allows_duplicate_labels is False + assert df["A"].flags.allows_duplicate_labels is False + assert df.loc[0].flags.allows_duplicate_labels is False + assert df.loc[[0]].flags.allows_duplicate_labels is False + assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False + + @pytest.mark.xfail(reason="Unclear behavior.") + def test_ndframe_getitem_caching_issue(self): + # NDFrame.__getitem__ will cache the first df['A']. May need to + # invalidate that cache? Update the cached entries? + df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False) + assert df["A"].flags.allows_duplicate_labels is False + df.flags.allows_duplicate_labels = True + assert df["A"].flags.allows_duplicate_labels is True + + @pytest.mark.parametrize( + "objs, kwargs", + [ + # Series + ( + [ + pd.Series(1, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.Series(2, index=["c", "d"]).set_flags( + allows_duplicate_labels=False + ), + ], + {}, + ), + ( + [ + pd.Series(1, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.Series(2, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + ], + {"ignore_index": True}, + ), + ( + [ + pd.Series(1, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.Series(2, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + ], + {"axis": 1}, + ), + # Frame + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"A": [1, 2]}, index=["c", "d"]).set_flags( + allows_duplicate_labels=False + ), + ], + {}, + ), + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + ], + {"ignore_index": True}, + ), + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [1, 2]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + ], + {"axis": 1}, + ), + # Series / Frame + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.Series([1, 2], index=["a", "b"], name="B",).set_flags( + allows_duplicate_labels=False, + ), + ], + {"axis": 1}, + ), + ], + ) + def test_concat(self, objs, kwargs): + result = pd.concat(objs, **kwargs) + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "left, right, kwargs, expected", + [ + # false false false + pytest.param( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags( + allows_duplicate_labels=False + ), + dict(left_index=True, right_index=True), + False, + marks=not_implemented, + ), + # false true false + pytest.param( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + dict(left_index=True, right_index=True), + False, + marks=not_implemented, + ), + # true true true + ( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + dict(left_index=True, right_index=True), + True, + ), + ], + ) + def test_merge(self, left, right, kwargs, expected): + result = pd.merge(left, right, **kwargs) + assert result.flags.allows_duplicate_labels is expected + + @not_implemented + def test_groupby(self): + # XXX: This is under tested + # TODO: + # - apply + # - transform + # - Should passing a grouper that disallows duplicates propagate? + df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False) + result = df.groupby([0, 0, 1]).agg("count") + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize("frame", [True, False]) + @not_implemented + def test_window(self, frame): + df = pd.Series( + 1, + index=pd.date_range("2000", periods=12), + name="A", + allows_duplicate_labels=False, + ) + if frame: + df = df.to_frame() + assert df.rolling(3).mean().flags.allows_duplicate_labels is False + assert df.ewm(3).mean().flags.allows_duplicate_labels is False + assert df.expanding(3).mean().flags.allows_duplicate_labels is False + + +# ---------------------------------------------------------------------------- +# Raises + + +class TestRaises: + @pytest.mark.parametrize( + "cls, axes", + [ + (pd.Series, {"index": ["a", "a"], "dtype": float}), + (pd.DataFrame, {"index": ["a", "a"]}), + (pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}), + (pd.DataFrame, {"columns": ["b", "b"]}), + ], + ) + def test_set_flags_with_duplicates(self, cls, axes): + result = cls(**axes) + assert result.flags.allows_duplicate_labels is True + + with pytest.raises(pd.errors.DuplicateLabelError): + cls(**axes).set_flags(allows_duplicate_labels=False) + + @pytest.mark.parametrize( + "data", + [ + pd.Series(index=[0, 0], dtype=float), + pd.DataFrame(index=[0, 0]), + pd.DataFrame(columns=[0, 0]), + ], + ) + def test_setting_allows_duplicate_labels_raises(self, data): + with pytest.raises(pd.errors.DuplicateLabelError): + data.flags.allows_duplicate_labels = False + + assert data.flags.allows_duplicate_labels is True + + @pytest.mark.parametrize( + "func", [operator.methodcaller("append", pd.Series(0, index=["a", "b"]))] + ) + def test_series_raises(self, func): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + with pytest.raises(pd.errors.DuplicateLabelError): + func(s) + + @pytest.mark.parametrize( + "getter, target", + [ + (operator.itemgetter(["A", "A"]), None), + # loc + (operator.itemgetter(["a", "a"]), "loc"), + pytest.param( + operator.itemgetter(("a", ["A", "A"])), "loc", marks=not_implemented + ), + pytest.param( + operator.itemgetter((["a", "a"], "A")), "loc", marks=not_implemented + ), + # iloc + (operator.itemgetter([0, 0]), "iloc"), + pytest.param( + operator.itemgetter((0, [0, 0])), "iloc", marks=not_implemented + ), + pytest.param( + operator.itemgetter(([0, 0], 0)), "iloc", marks=not_implemented + ), + ], + ) + def test_getitem_raises(self, getter, target): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + if target: + # df, df.loc, or df.iloc + target = getattr(df, target) + else: + target = df + + with pytest.raises(pd.errors.DuplicateLabelError): + getter(target) + + @pytest.mark.parametrize( + "objs, kwargs", + [ + ( + [ + pd.Series(1, index=[0, 1], name="a").set_flags( + allows_duplicate_labels=False + ), + pd.Series(2, index=[0, 1], name="a").set_flags( + allows_duplicate_labels=False + ), + ], + {"axis": 1}, + ) + ], + ) + def test_concat_raises(self, objs, kwargs): + with pytest.raises(pd.errors.DuplicateLabelError): + pd.concat(objs, **kwargs) + + @not_implemented + def test_merge_raises(self): + a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags( + allows_duplicate_labels=False + ) + b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"]) + with pytest.raises(pd.errors.DuplicateLabelError): + pd.merge(a, b, left_index=True, right_index=True) + + +@pytest.mark.parametrize( + "idx", + [ + pd.Index([1, 1]), + pd.Index(["a", "a"]), + pd.Index([1.1, 1.1]), + pd.PeriodIndex([pd.Period("2000", "D")] * 2), + pd.DatetimeIndex([pd.Timestamp("2000")] * 2), + pd.TimedeltaIndex([pd.Timedelta("1D")] * 2), + pd.CategoricalIndex(["a", "a"]), + pd.IntervalIndex([pd.Interval(0, 1)] * 2), + pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]), + ], + ids=lambda x: type(x).__name__, +) +def test_raises_basic(idx): + with pytest.raises(pd.errors.DuplicateLabelError): + pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False) + + with pytest.raises(pd.errors.DuplicateLabelError): + pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False) + + with pytest.raises(pd.errors.DuplicateLabelError): + pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False) + + +def test_format_duplicate_labels_message(): + idx = pd.Index(["a", "b", "a", "b", "c"]) + result = idx._format_duplicate_message() + expected = pd.DataFrame( + {"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label") + ) + tm.assert_frame_equal(result, expected) + + +def test_format_duplicate_labels_message_multi(): + idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]]) + result = idx._format_duplicate_message() + expected = pd.DataFrame( + {"positions": [[0, 2], [1, 3]]}, + index=pd.MultiIndex.from_product([["A"], ["a", "b"]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_insert_raises(): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + with pytest.raises(ValueError, match="Cannot specify"): + df.insert(0, "A", [3, 4], allow_duplicates=True) + + +@pytest.mark.parametrize( + "method, frame_only", + [ + (operator.methodcaller("set_index", "A", inplace=True), True), + (operator.methodcaller("set_axis", ["A", "B"], inplace=True), False), + (operator.methodcaller("reset_index", inplace=True), True), + (operator.methodcaller("rename", lambda x: x, inplace=True), False), + ], +) +def test_inplace_raises(method, frame_only): + df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( + allows_duplicate_labels=False + ) + s = df["A"] + s.flags.allows_duplicate_labels = False + msg = "Cannot specify" + + with pytest.raises(ValueError, match=msg): + method(df) + if not frame_only: + with pytest.raises(ValueError, match=msg): + method(s) + + +def test_pickle(): + a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) + b = tm.round_trip_pickle(a) + tm.assert_series_equal(a, b) + + a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False) + b = tm.round_trip_pickle(a) + tm.assert_frame_equal(a, b) diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 5e66925a38ec6..23bb673586768 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -887,3 +887,13 @@ def test_axis_numbers_deprecated(self, box): obj = box(dtype=object) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): obj._AXIS_NUMBERS + + @pytest.mark.parametrize("as_frame", [True, False]) + def test_flags_identity(self, as_frame): + s = pd.Series([1, 2]) + if as_frame: + s = s.to_frame() + + assert s.flags is s.flags + s2 = s.copy() + assert s2.flags is not s.flags diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index d81e8a4f82ffb..a69c0ee75eaba 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -524,6 +524,32 @@ def test_attrs(self): result = s + 1 assert result.attrs == {"version": 1} + @pytest.mark.parametrize("allows_duplicate_labels", [True, False, None]) + def test_set_flags(self, allows_duplicate_labels): + df = pd.Series([1, 2]) + result = df.set_flags(allows_duplicate_labels=allows_duplicate_labels) + if allows_duplicate_labels is None: + # We don't update when it's not provided + assert result.flags.allows_duplicate_labels is True + else: + assert result.flags.allows_duplicate_labels is allows_duplicate_labels + + # We made a copy + assert df is not result + # We didn't mutate df + assert df.flags.allows_duplicate_labels is True + + # But we didn't copy data + result.iloc[0] = 0 + assert df.iloc[0] == 0 + + # Now we do copy. + result = df.set_flags( + copy=True, allows_duplicate_labels=allows_duplicate_labels + ) + result.iloc[0] = 10 + assert df.iloc[0] == 0 + class TestCategoricalSeries: @pytest.mark.parametrize( diff --git a/pandas/tests/test_flags.py b/pandas/tests/test_flags.py new file mode 100644 index 0000000000000..f6e3ae4980afb --- /dev/null +++ b/pandas/tests/test_flags.py @@ -0,0 +1,48 @@ +import pytest + +import pandas as pd + + +class TestFlags: + def test_equality(self): + a = pd.DataFrame().set_flags(allows_duplicate_labels=True).flags + b = pd.DataFrame().set_flags(allows_duplicate_labels=False).flags + + assert a == a + assert b == b + assert a != b + assert a != 2 + + def test_set(self): + df = pd.DataFrame().set_flags(allows_duplicate_labels=True) + a = df.flags + a.allows_duplicate_labels = False + assert a.allows_duplicate_labels is False + a["allows_duplicate_labels"] = True + assert a.allows_duplicate_labels is True + + def test_repr(self): + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=True).flags) + assert a == "" + a = repr(pd.DataFrame({"A"}).set_flags(allows_duplicate_labels=False).flags) + assert a == "" + + def test_obj_ref(self): + df = pd.DataFrame() + flags = df.flags + del df + with pytest.raises(ValueError, match="object has been deleted"): + flags.allows_duplicate_labels = True + + def test_getitem(self): + df = pd.DataFrame() + flags = df.flags + assert flags["allows_duplicate_labels"] is True + flags["allows_duplicate_labels"] = False + assert flags["allows_duplicate_labels"] is False + + with pytest.raises(KeyError): + flags["a"] + + with pytest.raises(ValueError): + flags["a"] = 10 diff --git a/pandas/tests/util/test_assert_frame_equal.py b/pandas/tests/util/test_assert_frame_equal.py index 3aa3c64923b14..5174ff005b5fb 100644 --- a/pandas/tests/util/test_assert_frame_equal.py +++ b/pandas/tests/util/test_assert_frame_equal.py @@ -268,3 +268,18 @@ def test_assert_frame_equal_ignore_extension_dtype_mismatch(right_dtype): left = pd.DataFrame({"a": [1, 2, 3]}, dtype="Int64") right = pd.DataFrame({"a": [1, 2, 3]}, dtype=right_dtype) tm.assert_frame_equal(left, right, check_dtype=False) + + +def test_allows_duplicate_labels(): + left = pd.DataFrame() + right = pd.DataFrame().set_flags(allows_duplicate_labels=False) + tm.assert_frame_equal(left, left) + tm.assert_frame_equal(right, right) + tm.assert_frame_equal(left, right, check_flags=False) + tm.assert_frame_equal(right, left, check_flags=False) + + with pytest.raises(AssertionError, match=" Date: Wed, 2 Sep 2020 23:06:18 -0400 Subject: [PATCH 409/632] BUG/ENH: compression for google cloud storage in to_csv (#35681) --- doc/source/whatsnew/v1.2.0.rst | 2 + pandas/_typing.py | 29 +++++++- pandas/core/frame.py | 13 ++-- pandas/core/generic.py | 2 + pandas/io/common.py | 104 ++++++++++++++++++++++------ pandas/io/excel/_base.py | 4 +- pandas/io/feather_format.py | 23 +++--- pandas/io/formats/csvs.py | 11 +-- pandas/io/json/_json.py | 19 +++-- pandas/io/orc.py | 4 +- pandas/io/parquet.py | 14 ++-- pandas/io/parsers.py | 11 +-- pandas/io/pickle.py | 28 +++++--- pandas/io/sas/sas7bdat.py | 2 +- pandas/io/sas/sas_xport.py | 9 +-- pandas/io/sas/sasreader.py | 16 +++-- pandas/io/stata.py | 15 ++-- pandas/tests/io/test_common.py | 43 ++++++++---- pandas/tests/io/test_compression.py | 10 +++ pandas/tests/io/test_gcs.py | 92 ++++++++++++++++++------ 20 files changed, 321 insertions(+), 130 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 7c083b95b21f3..b07351d05defb 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -290,6 +290,8 @@ I/O - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) - :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) +- :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue: `35058`) +- :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) Plotting ^^^^^^^^ diff --git a/pandas/_typing.py b/pandas/_typing.py index 1b972030ef5a5..f8af92e07c674 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -1,4 +1,6 @@ +from dataclasses import dataclass from datetime import datetime, timedelta, tzinfo +from io import IOBase from pathlib import Path from typing import ( IO, @@ -8,6 +10,7 @@ Callable, Collection, Dict, + Generic, Hashable, List, Mapping, @@ -62,7 +65,8 @@ "ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool]] ] DtypeObj = Union[np.dtype, "ExtensionDtype"] -FilePathOrBuffer = Union[str, Path, IO[AnyStr]] +FilePathOrBuffer = Union[str, Path, IO[AnyStr], IOBase] +FileOrBuffer = Union[str, IO[AnyStr], IOBase] # FrameOrSeriesUnion means either a DataFrame or a Series. E.g. # `def func(a: FrameOrSeriesUnion) -> FrameOrSeriesUnion: ...` means that if a Series @@ -114,3 +118,26 @@ # compression keywords and compression CompressionDict = Mapping[str, Optional[Union[str, int, bool]]] CompressionOptions = Optional[Union[str, CompressionDict]] + + +# let's bind types +ModeVar = TypeVar("ModeVar", str, None, Optional[str]) +EncodingVar = TypeVar("EncodingVar", str, None, Optional[str]) + + +@dataclass +class IOargs(Generic[ModeVar, EncodingVar]): + """ + Return value of io/common.py:get_filepath_or_buffer. + + Note (copy&past from io/parsers): + filepath_or_buffer can be Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] + though mypy handling of conditional imports is difficult. + See https://github.com/python/mypy/issues/1297 + """ + + filepath_or_buffer: FileOrBuffer + encoding: EncodingVar + compression: CompressionOptions + should_close: bool + mode: Union[ModeVar, str] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b4c12b9e52f56..c48bec9b670ad 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2284,14 +2284,11 @@ def to_markdown( result = tabulate.tabulate(self, **kwargs) if buf is None: return result - buf, _, _, should_close = get_filepath_or_buffer( - buf, mode=mode, storage_options=storage_options - ) - assert buf is not None # Help mypy. - assert not isinstance(buf, str) - buf.writelines(result) - if should_close: - buf.close() + ioargs = get_filepath_or_buffer(buf, mode=mode, storage_options=storage_options) + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.writelines(result) + if ioargs.should_close: + ioargs.filepath_or_buffer.close() return None @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c9eb4a34683f8..e22f9567ee955 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4,6 +4,7 @@ from datetime import timedelta import functools import gc +from io import StringIO import json import operator import pickle @@ -3357,6 +3358,7 @@ def to_csv( formatter.save() if path_or_buf is None: + assert isinstance(formatter.path_or_buf, StringIO) return formatter.path_or_buf.getvalue() return None diff --git a/pandas/io/common.py b/pandas/io/common.py index d1305c9cabe0e..97dbc7f1031a2 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -27,12 +27,17 @@ uses_params, uses_relative, ) +import warnings import zipfile from pandas._typing import ( CompressionDict, CompressionOptions, + EncodingVar, + FileOrBuffer, FilePathOrBuffer, + IOargs, + ModeVar, StorageOptions, ) from pandas.compat import _get_lzma_file, _import_lzma @@ -69,9 +74,7 @@ def is_url(url) -> bool: return parse_url(url).scheme in _VALID_URLS -def _expand_user( - filepath_or_buffer: FilePathOrBuffer[AnyStr], -) -> FilePathOrBuffer[AnyStr]: +def _expand_user(filepath_or_buffer: FileOrBuffer[AnyStr]) -> FileOrBuffer[AnyStr]: """ Return the argument with an initial component of ~ or ~user replaced by that user's home directory. @@ -101,7 +104,7 @@ def validate_header_arg(header) -> None: def stringify_path( filepath_or_buffer: FilePathOrBuffer[AnyStr], -) -> FilePathOrBuffer[AnyStr]: +) -> FileOrBuffer[AnyStr]: """ Attempt to convert a path-like object to a string. @@ -134,9 +137,9 @@ def stringify_path( # "__fspath__" [union-attr] # error: Item "IO[bytes]" of "Union[str, Path, IO[bytes]]" has no # attribute "__fspath__" [union-attr] - return filepath_or_buffer.__fspath__() # type: ignore[union-attr] + filepath_or_buffer = filepath_or_buffer.__fspath__() # type: ignore[union-attr] elif isinstance(filepath_or_buffer, pathlib.Path): - return str(filepath_or_buffer) + filepath_or_buffer = str(filepath_or_buffer) return _expand_user(filepath_or_buffer) @@ -162,13 +165,13 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: ) -def get_filepath_or_buffer( +def get_filepath_or_buffer( # type: ignore[assignment] filepath_or_buffer: FilePathOrBuffer, - encoding: Optional[str] = None, + encoding: EncodingVar = None, compression: CompressionOptions = None, - mode: Optional[str] = None, + mode: ModeVar = None, storage_options: StorageOptions = None, -): +) -> IOargs[ModeVar, EncodingVar]: """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. @@ -191,14 +194,35 @@ def get_filepath_or_buffer( .. versionadded:: 1.2.0 - Returns - ------- - Tuple[FilePathOrBuffer, str, CompressionOptions, bool] - Tuple containing the filepath or buffer, the encoding, the compression - and should_close. + ..versionchange:: 1.2.0 + + Returns the dataclass IOargs. """ filepath_or_buffer = stringify_path(filepath_or_buffer) + # bz2 and xz do not write the byte order mark for utf-16 and utf-32 + # print a warning when writing such files + compression_method = infer_compression( + filepath_or_buffer, get_compression_method(compression)[0] + ) + if ( + mode + and "w" in mode + and compression_method in ["bz2", "xz"] + and encoding in ["utf-16", "utf-32"] + ): + warnings.warn( + f"{compression} will not write the byte order mark for {encoding}", + UnicodeWarning, + ) + + # Use binary mode when converting path-like objects to file-like objects (fsspec) + # except when text mode is explicitly requested. The original mode is returned if + # fsspec is not used. + fsspec_mode = mode or "rb" + if "t" not in fsspec_mode and "b" not in fsspec_mode: + fsspec_mode += "b" + if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): # TODO: fsspec can also handle HTTP via requests, but leaving this unchanged if storage_options: @@ -212,7 +236,13 @@ def get_filepath_or_buffer( compression = "gzip" reader = BytesIO(req.read()) req.close() - return reader, encoding, compression, True + return IOargs( + filepath_or_buffer=reader, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) if is_fsspec_url(filepath_or_buffer): assert isinstance( @@ -244,7 +274,7 @@ def get_filepath_or_buffer( try: file_obj = fsspec.open( - filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) ).open() # GH 34626 Reads from Public Buckets without Credentials needs anon=True except tuple(err_types_to_retry_with_anon): @@ -255,23 +285,41 @@ def get_filepath_or_buffer( storage_options = dict(storage_options) storage_options["anon"] = True file_obj = fsspec.open( - filepath_or_buffer, mode=mode or "rb", **(storage_options or {}) + filepath_or_buffer, mode=fsspec_mode, **(storage_options or {}) ).open() - return file_obj, encoding, compression, True + return IOargs( + filepath_or_buffer=file_obj, + encoding=encoding, + compression=compression, + should_close=True, + mode=fsspec_mode, + ) elif storage_options: raise ValueError( "storage_options passed with file object or non-fsspec file path" ) if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): - return _expand_user(filepath_or_buffer), None, compression, False + return IOargs( + filepath_or_buffer=_expand_user(filepath_or_buffer), + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) if not is_file_like(filepath_or_buffer): msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}" raise ValueError(msg) - return filepath_or_buffer, None, compression, False + return IOargs( + filepath_or_buffer=filepath_or_buffer, + encoding=encoding, + compression=compression, + should_close=False, + mode=mode, + ) def file_path_to_url(path: str) -> str: @@ -452,6 +500,15 @@ def get_handle( need_text_wrapping = (BufferedIOBase, RawIOBase, S3File) except ImportError: need_text_wrapping = (BufferedIOBase, RawIOBase) + # fsspec is an optional dependency. If it is available, add its file-object + # class to the list of classes that need text wrapping. If fsspec is too old and is + # needed, get_filepath_or_buffer would already have thrown an exception. + try: + from fsspec.spec import AbstractFileSystem + + need_text_wrapping = (*need_text_wrapping, AbstractFileSystem) + except ImportError: + pass handles: List[Union[IO, _MMapWrapper]] = list() f = path_or_buf @@ -583,12 +640,15 @@ def __init__( self.archive_name = archive_name kwargs_zip: Dict[str, Any] = {"compression": zipfile.ZIP_DEFLATED} kwargs_zip.update(kwargs) - super().__init__(file, mode, **kwargs_zip) + super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type] def write(self, data): archive_name = self.filename if self.archive_name is not None: archive_name = self.archive_name + if archive_name is None: + # ZipFile needs a non-empty string + archive_name = "zip" super().writestr(archive_name, data) @property diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index ead36c95556b1..9bc1d7fedcb31 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -352,9 +352,9 @@ def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): if is_url(filepath_or_buffer): filepath_or_buffer = BytesIO(urlopen(filepath_or_buffer).read()) elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer = get_filepath_or_buffer( filepath_or_buffer, storage_options=storage_options - ) + ).filepath_or_buffer if isinstance(filepath_or_buffer, self._workbook_class): self.book = filepath_or_buffer diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index fb606b5ec8aef..a98eebe1c6a2a 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -34,9 +34,7 @@ def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kw import_optional_dependency("pyarrow") from pyarrow import feather - path, _, _, should_close = get_filepath_or_buffer( - path, mode="wb", storage_options=storage_options - ) + ioargs = get_filepath_or_buffer(path, mode="wb", storage_options=storage_options) if not isinstance(df, DataFrame): raise ValueError("feather only support IO with DataFrames") @@ -74,7 +72,11 @@ def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kw if df.columns.inferred_type not in valid_types: raise ValueError("feather must have string column names") - feather.write_feather(df, path, **kwargs) + feather.write_feather(df, ioargs.filepath_or_buffer, **kwargs) + + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.close() def read_feather( @@ -122,14 +124,15 @@ def read_feather( import_optional_dependency("pyarrow") from pyarrow import feather - path, _, _, should_close = get_filepath_or_buffer( - path, storage_options=storage_options - ) + ioargs = get_filepath_or_buffer(path, storage_options=storage_options) - df = feather.read_feather(path, columns=columns, use_threads=bool(use_threads)) + df = feather.read_feather( + ioargs.filepath_or_buffer, columns=columns, use_threads=bool(use_threads) + ) # s3fs only validates the credentials when the file is closed. - if should_close: - path.close() + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.close() return df diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index c462a96da7133..270caec022fef 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -62,14 +62,19 @@ def __init__( # Extract compression mode as given, if dict compression, self.compression_args = get_compression_method(compression) + self.compression = infer_compression(path_or_buf, compression) - self.path_or_buf, _, _, self.should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, - compression=compression, + compression=self.compression, mode=mode, storage_options=storage_options, ) + self.path_or_buf = ioargs.filepath_or_buffer + self.should_close = ioargs.should_close + self.mode = ioargs.mode + self.sep = sep self.na_rep = na_rep self.float_format = float_format @@ -78,12 +83,10 @@ def __init__( self.header = header self.index = index self.index_label = index_label - self.mode = mode if encoding is None: encoding = "utf-8" self.encoding = encoding self.errors = errors - self.compression = infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index fe5e172655ae1..7a3b76ff7e3d0 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -58,12 +58,14 @@ def to_json( ) if path_or_buf is not None: - path_or_buf, _, _, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( path_or_buf, compression=compression, mode="wt", storage_options=storage_options, ) + path_or_buf = ioargs.filepath_or_buffer + should_close = ioargs.should_close if lines and orient != "records": raise ValueError("'lines' keyword only valid when 'orient' is records") @@ -102,6 +104,8 @@ def to_json( fh.write(s) finally: fh.close() + for handle in handles: + handle.close() elif path_or_buf is None: return s else: @@ -615,7 +619,7 @@ def read_json( compression_method, compression = get_compression_method(compression) compression_method = infer_compression(path_or_buf, compression_method) compression = dict(compression, method=compression_method) - filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, @@ -623,7 +627,7 @@ def read_json( ) json_reader = JsonReader( - filepath_or_buffer, + ioargs.filepath_or_buffer, orient=orient, typ=typ, dtype=dtype, @@ -633,10 +637,10 @@ def read_json( numpy=numpy, precise_float=precise_float, date_unit=date_unit, - encoding=encoding, + encoding=ioargs.encoding, lines=lines, chunksize=chunksize, - compression=compression, + compression=ioargs.compression, nrows=nrows, ) @@ -644,8 +648,9 @@ def read_json( return json_reader result = json_reader.read() - if should_close: - filepath_or_buffer.close() + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) + ioargs.filepath_or_buffer.close() return result diff --git a/pandas/io/orc.py b/pandas/io/orc.py index b556732e4d116..f1b1aa6a43cb5 100644 --- a/pandas/io/orc.py +++ b/pandas/io/orc.py @@ -50,7 +50,7 @@ def read_orc( import pyarrow.orc - path, _, _, _ = get_filepath_or_buffer(path) - orc_file = pyarrow.orc.ORCFile(path) + ioargs = get_filepath_or_buffer(path) + orc_file = pyarrow.orc.ORCFile(ioargs.filepath_or_buffer) result = orc_file.read(columns=columns, **kwargs).to_pandas() return result diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index f2ce2f056ce82..07f2078931687 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -9,7 +9,7 @@ from pandas import DataFrame, get_option -from pandas.io.common import _expand_user, get_filepath_or_buffer, is_fsspec_url +from pandas.io.common import get_filepath_or_buffer, is_fsspec_url, stringify_path def get_engine(engine: str) -> "BaseImpl": @@ -113,7 +113,7 @@ def write( raise ValueError( "storage_options passed with file object or non-fsspec file path" ) - path = _expand_user(path) + path = stringify_path(path) if partition_cols is not None: # writes to multiple files under the given path self.api.parquet.write_to_dataset( @@ -143,10 +143,12 @@ def read( ) fs = kwargs.pop("filesystem", None) should_close = False - path = _expand_user(path) + path = stringify_path(path) if not fs: - path, _, _, should_close = get_filepath_or_buffer(path) + ioargs = get_filepath_or_buffer(path) + path = ioargs.filepath_or_buffer + should_close = ioargs.should_close kwargs["use_pandas_metadata"] = True result = self.api.parquet.read_table( @@ -205,7 +207,7 @@ def write( raise ValueError( "storage_options passed with file object or non-fsspec file path" ) - path, _, _, _ = get_filepath_or_buffer(path) + path = get_filepath_or_buffer(path).filepath_or_buffer with catch_warnings(record=True): self.api.write( @@ -228,7 +230,7 @@ def read( ).open() parquet_file = self.api.ParquetFile(path, open_with=open_with) else: - path, _, _, _ = get_filepath_or_buffer(path) + path = get_filepath_or_buffer(path).filepath_or_buffer parquet_file = self.api.ParquetFile(path) return parquet_file.to_pandas(columns=columns, **kwargs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9ad527684120e..c6ef5221e7ead 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -432,10 +432,10 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] # though mypy handling of conditional imports is difficult. # See https://github.com/python/mypy/issues/1297 - fp_or_buf, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( filepath_or_buffer, encoding, compression, storage_options=storage_options ) - kwds["compression"] = compression + kwds["compression"] = ioargs.compression if kwds.get("date_parser", None) is not None: if isinstance(kwds["parse_dates"], bool): @@ -450,7 +450,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): _validate_names(kwds.get("names", None)) # Create the parser. - parser = TextFileReader(fp_or_buf, **kwds) + parser = TextFileReader(ioargs.filepath_or_buffer, **kwds) if chunksize or iterator: return parser @@ -460,9 +460,10 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): finally: parser.close() - if should_close: + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) try: - fp_or_buf.close() + ioargs.filepath_or_buffer.close() except ValueError: pass diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index fc1d2e385cf72..857a2d1b69be4 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -86,15 +86,18 @@ def to_pickle( >>> import os >>> os.remove("./dummy.pkl") """ - fp_or_buf, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( filepath_or_buffer, compression=compression, mode="wb", storage_options=storage_options, ) - if not isinstance(fp_or_buf, str) and compression == "infer": + compression = ioargs.compression + if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": compression = None - f, fh = get_handle(fp_or_buf, "wb", compression=compression, is_text=False) + f, fh = get_handle( + ioargs.filepath_or_buffer, "wb", compression=compression, is_text=False + ) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL try: @@ -105,9 +108,10 @@ def to_pickle( f.close() for _f in fh: _f.close() - if should_close: + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) try: - fp_or_buf.close() + ioargs.filepath_or_buffer.close() except ValueError: pass @@ -189,12 +193,15 @@ def read_pickle( >>> import os >>> os.remove("./dummy.pkl") """ - fp_or_buf, _, compression, should_close = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( filepath_or_buffer, compression=compression, storage_options=storage_options ) - if not isinstance(fp_or_buf, str) and compression == "infer": + compression = ioargs.compression + if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": compression = None - f, fh = get_handle(fp_or_buf, "rb", compression=compression, is_text=False) + f, fh = get_handle( + ioargs.filepath_or_buffer, "rb", compression=compression, is_text=False + ) # 1) try standard library Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes @@ -222,8 +229,9 @@ def read_pickle( f.close() for _f in fh: _f.close() - if should_close: + if ioargs.should_close: + assert not isinstance(ioargs.filepath_or_buffer, str) try: - fp_or_buf.close() + ioargs.filepath_or_buffer.close() except ValueError: pass diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 3d9be7c15726b..76dac39d1889f 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -137,7 +137,7 @@ def __init__( self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 - self._path_or_buf, _, _, _ = get_filepath_or_buffer(path_or_buf) + self._path_or_buf = get_filepath_or_buffer(path_or_buf).filepath_or_buffer if isinstance(self._path_or_buf, str): self._path_or_buf = open(self._path_or_buf, "rb") self.handle = self._path_or_buf diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 6cf248b748107..e4d9324ce5130 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -253,12 +253,9 @@ def __init__( self._chunksize = chunksize if isinstance(filepath_or_buffer, str): - ( - filepath_or_buffer, - encoding, - compression, - should_close, - ) = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding) + filepath_or_buffer = get_filepath_or_buffer( + filepath_or_buffer, encoding=encoding + ).filepath_or_buffer if isinstance(filepath_or_buffer, (str, bytes)): self.filepath_or_buffer = open(filepath_or_buffer, "rb") diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index fffdebda8c87a..ae9457a8e3147 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -109,22 +109,26 @@ def read_sas( else: raise ValueError("unable to infer format of SAS file") - filepath_or_buffer, _, _, should_close = get_filepath_or_buffer( - filepath_or_buffer, encoding - ) + ioargs = get_filepath_or_buffer(filepath_or_buffer, encoding) reader: ReaderBase if format.lower() == "xport": from pandas.io.sas.sas_xport import XportReader reader = XportReader( - filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize + ioargs.filepath_or_buffer, + index=index, + encoding=ioargs.encoding, + chunksize=chunksize, ) elif format.lower() == "sas7bdat": from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader( - filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize + ioargs.filepath_or_buffer, + index=index, + encoding=ioargs.encoding, + chunksize=chunksize, ) else: raise ValueError("unknown SAS format") @@ -134,6 +138,6 @@ def read_sas( data = reader.read() - if should_close: + if ioargs.should_close: reader.close() return data diff --git a/pandas/io/stata.py b/pandas/io/stata.py index ec3819f1673a8..0074ebc4decb0 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1069,9 +1069,9 @@ def __init__( self._native_byteorder = _set_endianness(sys.byteorder) path_or_buf = stringify_path(path_or_buf) if isinstance(path_or_buf, str): - path_or_buf, encoding, _, should_close = get_filepath_or_buffer( + path_or_buf = get_filepath_or_buffer( path_or_buf, storage_options=storage_options - ) + ).filepath_or_buffer if isinstance(path_or_buf, (str, bytes)): self.path_or_buf = open(path_or_buf, "rb") @@ -1979,11 +1979,16 @@ def _open_file_binary_write( compression_typ, compression_args = get_compression_method(compression) compression_typ = infer_compression(fname, compression_typ) compression = dict(compression_args, method=compression_typ) - path_or_buf, _, compression, _ = get_filepath_or_buffer( + ioargs = get_filepath_or_buffer( fname, mode="wb", compression=compression, storage_options=storage_options, ) - f, _ = get_handle(path_or_buf, "wb", compression=compression, is_text=False) - return f, True, compression + f, _ = get_handle( + ioargs.filepath_or_buffer, + "wb", + compression=ioargs.compression, + is_text=False, + ) + return f, True, ioargs.compression else: raise TypeError("fname must be a binary file, buffer or path-like.") diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 5ce2233bc0cd0..85a12a13d19fb 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -105,21 +105,21 @@ def test_infer_compression_from_path(self, extension, expected, path_type): compression = icom.infer_compression(path, compression="infer") assert compression == expected - def test_get_filepath_or_buffer_with_path(self): - filename = "~/sometest" - filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer(filename) - assert filepath_or_buffer != filename - assert os.path.isabs(filepath_or_buffer) - assert os.path.expanduser(filename) == filepath_or_buffer - assert not should_close + @pytest.mark.parametrize("path_type", [str, CustomFSPath, Path]) + def test_get_filepath_or_buffer_with_path(self, path_type): + # ignore LocalPath: it creates strange paths: /absolute/~/sometest + filename = path_type("~/sometest") + ioargs = icom.get_filepath_or_buffer(filename) + assert ioargs.filepath_or_buffer != filename + assert os.path.isabs(ioargs.filepath_or_buffer) + assert os.path.expanduser(filename) == ioargs.filepath_or_buffer + assert not ioargs.should_close def test_get_filepath_or_buffer_with_buffer(self): input_buffer = StringIO() - filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer( - input_buffer - ) - assert filepath_or_buffer == input_buffer - assert not should_close + ioargs = icom.get_filepath_or_buffer(input_buffer) + assert ioargs.filepath_or_buffer == input_buffer + assert not ioargs.should_close def test_iterator(self): reader = pd.read_csv(StringIO(self.data1), chunksize=1) @@ -389,6 +389,25 @@ def test_binary_mode(self): df.to_csv(path, mode="w+b") tm.assert_frame_equal(df, pd.read_csv(path, index_col=0)) + @pytest.mark.parametrize("encoding", ["utf-16", "utf-32"]) + @pytest.mark.parametrize("compression_", ["bz2", "xz"]) + def test_warning_missing_utf_bom(self, encoding, compression_): + """ + bz2 and xz do not write the byte order mark (BOM) for utf-16/32. + + https://stackoverflow.com/questions/55171439 + + GH 35681 + """ + df = tm.makeDataFrame() + with tm.ensure_clean() as path: + with tm.assert_produces_warning(UnicodeWarning): + df.to_csv(path, compression=compression_, encoding=encoding) + + # reading should fail (otherwise we wouldn't need the warning) + with pytest.raises(Exception): + pd.read_csv(path, compression=compression_, encoding=encoding) + def test_is_fsspec_url(): assert icom.is_fsspec_url("gcs://pandas/somethingelse.com") diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index bc14b485f75e5..31e9ad4cf4416 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -124,6 +124,8 @@ def test_compression_binary(compression_only): GH22555 """ df = tm.makeDataFrame() + + # with a file with tm.ensure_clean() as path: with open(path, mode="wb") as file: df.to_csv(file, mode="wb", compression=compression_only) @@ -132,6 +134,14 @@ def test_compression_binary(compression_only): df, pd.read_csv(path, index_col=0, compression=compression_only) ) + # with BytesIO + file = io.BytesIO() + df.to_csv(file, mode="wb", compression=compression_only) + file.seek(0) # file shouldn't be closed + tm.assert_frame_equal( + df, pd.read_csv(file, index_col=0, compression=compression_only) + ) + def test_gzip_reproducibility_file_name(): """ diff --git a/pandas/tests/io/test_gcs.py b/pandas/tests/io/test_gcs.py index eacf4fa08545d..18b5743a3375a 100644 --- a/pandas/tests/io/test_gcs.py +++ b/pandas/tests/io/test_gcs.py @@ -9,12 +9,32 @@ from pandas.util import _test_decorators as td -@td.skip_if_no("gcsfs") -def test_read_csv_gcs(monkeypatch): +@pytest.fixture +def gcs_buffer(monkeypatch): + """Emulate GCS using a binary buffer.""" from fsspec import AbstractFileSystem, registry registry.target.clear() # noqa # remove state + gcs_buffer = BytesIO() + gcs_buffer.close = lambda: True + + class MockGCSFileSystem(AbstractFileSystem): + def open(*args, **kwargs): + gcs_buffer.seek(0) + return gcs_buffer + + monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) + + return gcs_buffer + + +@td.skip_if_no("gcsfs") +def test_read_csv_gcs(gcs_buffer): + from fsspec import registry + + registry.target.clear() # noqa # remove state + df1 = DataFrame( { "int": [1, 3], @@ -24,21 +44,19 @@ def test_read_csv_gcs(monkeypatch): } ) - class MockGCSFileSystem(AbstractFileSystem): - def open(*args, **kwargs): - return BytesIO(df1.to_csv(index=False).encode()) + gcs_buffer.write(df1.to_csv(index=False).encode()) - monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) df2 = read_csv("gs://test/test.csv", parse_dates=["dt"]) tm.assert_frame_equal(df1, df2) @td.skip_if_no("gcsfs") -def test_to_csv_gcs(monkeypatch): - from fsspec import AbstractFileSystem, registry +def test_to_csv_gcs(gcs_buffer): + from fsspec import registry registry.target.clear() # noqa # remove state + df1 = DataFrame( { "int": [1, 3], @@ -47,29 +65,57 @@ def test_to_csv_gcs(monkeypatch): "dt": date_range("2018-06-18", periods=2), } ) - s = BytesIO() - s.close = lambda: True - - class MockGCSFileSystem(AbstractFileSystem): - def open(*args, **kwargs): - s.seek(0) - return s - monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem) df1.to_csv("gs://test/test.csv", index=True) - def mock_get_filepath_or_buffer(*args, **kwargs): - return BytesIO(df1.to_csv(index=True).encode()), None, None, False - - monkeypatch.setattr( - "pandas.io.common.get_filepath_or_buffer", mock_get_filepath_or_buffer - ) - df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0) tm.assert_frame_equal(df1, df2) +@td.skip_if_no("gcsfs") +@pytest.mark.parametrize("encoding", ["utf-8", "cp1251"]) +def test_to_csv_compression_encoding_gcs(gcs_buffer, compression_only, encoding): + """ + Compression and encoding should with GCS. + + GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and + GH 32392 (read_csv, encoding) + """ + from fsspec import registry + + registry.target.clear() # noqa # remove state + df = tm.makeDataFrame() + + # reference of compressed and encoded file + compression = {"method": compression_only} + if compression_only == "gzip": + compression["mtime"] = 1 # be reproducible + buffer = BytesIO() + df.to_csv(buffer, compression=compression, encoding=encoding, mode="wb") + + # write compressed file with explicit compression + path_gcs = "gs://test/test.csv" + df.to_csv(path_gcs, compression=compression, encoding=encoding) + assert gcs_buffer.getvalue() == buffer.getvalue() + read_df = read_csv( + path_gcs, index_col=0, compression=compression_only, encoding=encoding + ) + tm.assert_frame_equal(df, read_df) + + # write compressed file with implicit compression + if compression_only == "gzip": + compression_only = "gz" + compression["method"] = "infer" + path_gcs += f".{compression_only}" + df.to_csv( + path_gcs, compression=compression, encoding=encoding, + ) + assert gcs_buffer.getvalue() == buffer.getvalue() + read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding) + tm.assert_frame_equal(df, read_df) + + @td.skip_if_no("fastparquet") @td.skip_if_no("gcsfs") def test_to_parquet_gcs_new_file(monkeypatch, tmpdir): From 309018c7ce7e9e31708f58af971ed92823921493 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 3 Sep 2020 11:20:51 +0100 Subject: [PATCH 410/632] CI: MyPy fixup (#36085) --- pandas/io/common.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pandas/io/common.py b/pandas/io/common.py index 97dbc7f1031a2..9328f90ce67a3 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -165,11 +165,16 @@ def is_fsspec_url(url: FilePathOrBuffer) -> bool: ) -def get_filepath_or_buffer( # type: ignore[assignment] +# https://github.com/python/mypy/issues/8708 +# error: Incompatible default for argument "encoding" (default has type "None", +# argument has type "str") +# error: Incompatible default for argument "mode" (default has type "None", +# argument has type "str") +def get_filepath_or_buffer( filepath_or_buffer: FilePathOrBuffer, - encoding: EncodingVar = None, + encoding: EncodingVar = None, # type: ignore[assignment] compression: CompressionOptions = None, - mode: ModeVar = None, + mode: ModeVar = None, # type: ignore[assignment] storage_options: StorageOptions = None, ) -> IOargs[ModeVar, EncodingVar]: """ From f1d9497d00f9c6e2bb72ec8c2637b19ef64713f1 Mon Sep 17 00:00:00 2001 From: Albert Villanova del Moral <8515462+albertvillanova@users.noreply.github.com> Date: Thu, 3 Sep 2020 18:14:09 +0200 Subject: [PATCH 411/632] Update contributing_docstring.rst (#36087) --- doc/source/development/contributing_docstring.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/source/development/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst index 0c780ad5f5847..33f30e1d97512 100644 --- a/doc/source/development/contributing_docstring.rst +++ b/doc/source/development/contributing_docstring.rst @@ -32,18 +32,18 @@ The next example gives an idea of what a docstring looks like: Parameters ---------- num1 : int - First number to add + First number to add. num2 : int - Second number to add + Second number to add. Returns ------- int - The sum of `num1` and `num2` + The sum of `num1` and `num2`. See Also -------- - subtract : Subtract one integer from another + subtract : Subtract one integer from another. Examples -------- @@ -998,4 +998,4 @@ mapping function names to docstrings. Wherever possible, we prefer using See ``pandas.core.generic.NDFrame.fillna`` for an example template, and ``pandas.core.series.Series.fillna`` and ``pandas.core.generic.frame.fillna`` -for the filled versions. \ No newline at end of file +for the filled versions. From a0c8168f87bfb191cb7a139464d06293cf4ebd65 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 3 Sep 2020 17:29:46 +0100 Subject: [PATCH 412/632] DOC: minor fixes to whatsnew\v1.1.2.rst (#36086) --- doc/source/whatsnew/v1.1.2.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c740c7b3882c9..ac9fe9d2fca26 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -30,9 +30,9 @@ Bug fixes - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) -- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should bw ``""`` (:issue:`35712`) +- Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) -- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`36051`) +- Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) .. --------------------------------------------------------------------------- @@ -40,7 +40,7 @@ Bug fixes Other ~~~~~ -- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize`(:issue:`35667`) +- :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize` (:issue:`35667`) .. --------------------------------------------------------------------------- From 8b38239783e9d51dc46428384cb748a3150e758b Mon Sep 17 00:00:00 2001 From: Sarthak Vineet Kumar Date: Thu, 3 Sep 2020 22:03:46 +0530 Subject: [PATCH 413/632] CLN remove unnecessary trailing commas in pandas/io (#36052) --- pandas/io/sas/sas_xport.py | 2 +- pandas/io/stata.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index e4d9324ce5130..1a4ba544f5d59 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -244,7 +244,7 @@ class XportReader(ReaderBase, abc.Iterator): __doc__ = _xport_reader_doc def __init__( - self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None, + self, filepath_or_buffer, index=None, encoding="ISO-8859-1", chunksize=None ): self._encoding = encoding diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 0074ebc4decb0..34d520004cc65 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1980,7 +1980,7 @@ def _open_file_binary_write( compression_typ = infer_compression(fname, compression_typ) compression = dict(compression_args, method=compression_typ) ioargs = get_filepath_or_buffer( - fname, mode="wb", compression=compression, storage_options=storage_options, + fname, mode="wb", compression=compression, storage_options=storage_options ) f, _ = get_handle( ioargs.filepath_or_buffer, From f3408ea660f27b304589f3567ae90263165645b7 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Thu, 3 Sep 2020 17:35:04 +0100 Subject: [PATCH 414/632] DOC: add mypy version to whatsnew\v1.2.0.rst (#36090) --- doc/source/whatsnew/v1.2.0.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b07351d05defb..e65daa439a225 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -136,6 +136,8 @@ If installed, we now require: +-----------------+-----------------+----------+---------+ | pytest (dev) | 5.0.1 | | X | +-----------------+-----------------+----------+---------+ +| mypy (dev) | 0.782 | | X | ++-----------------+-----------------+----------+---------+ For `optional libraries `_ the general recommendation is to use the latest version. The following table lists the lowest version per library that is currently being tested throughout the development of pandas. From 497ede8065e81065636908fecd31432a91c1ff64 Mon Sep 17 00:00:00 2001 From: timhunderwood <43515959+timhunderwood@users.noreply.github.com> Date: Thu, 3 Sep 2020 17:58:36 +0100 Subject: [PATCH 415/632] DOC: Add Notes about difference to numpy behaviour for ddof in std() GH35985 (#35986) * DOC: Add Notes about difference to numpy behaviour for ddof. GH35985. * remove trailing whitespace. * Update pandas/core/generic.py wording change. Co-authored-by: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> * Make wording simpler and remove reference to normalization. * Make wording simpler and remove reference to normalization. Co-authored-by: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> --- pandas/core/generic.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e22f9567ee955..6c8780a0fc186 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10832,7 +10832,12 @@ def _doc_parms(cls): Returns ------- -%(name1)s or %(name2)s (if level specified)\n""" +%(name1)s or %(name2)s (if level specified) + +Notes +----- +To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the +default `ddof=1`)\n""" _bool_doc = """ %(desc)s From b80dcbcdc9aac4e4ce33205c4742f2d838880135 Mon Sep 17 00:00:00 2001 From: Jeet Parekh <12874561+jeet-parekh@users.noreply.github.com> Date: Fri, 4 Sep 2020 19:58:15 +0530 Subject: [PATCH 416/632] BUG: groupby and agg on read-only array gives ValueError: buffer source array is read-only (#36061) --- doc/source/whatsnew/v1.1.2.rst | 2 +- pandas/_libs/groupby.pyx | 32 ++++++++------- pandas/tests/groupby/aggregate/test_cython.py | 41 +++++++++++++++++++ 3 files changed, 60 insertions(+), 15 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index ac9fe9d2fca26..7195f3d7a3885 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -18,7 +18,7 @@ Fixed regressions - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) -- +- Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 38cb973d6dde9..a83634aad3ce2 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -229,7 +229,7 @@ def group_cumprod_float64(float64_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cumsum(numeric[:, :] out, - numeric[:, :] values, + ndarray[numeric, ndim=2] values, const int64_t[:] labels, int ngroups, is_datetimelike, @@ -472,7 +472,7 @@ ctypedef fused complexfloating_t: @cython.boundscheck(False) def _group_add(complexfloating_t[:, :] out, int64_t[:] counts, - complexfloating_t[:, :] values, + ndarray[complexfloating_t, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=0): """ @@ -483,8 +483,9 @@ def _group_add(complexfloating_t[:, :] out, complexfloating_t val, count complexfloating_t[:, :] sumx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) - if len(values) != len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((out).shape, dtype=np.int64) @@ -530,7 +531,7 @@ group_add_complex128 = _group_add['double complex'] @cython.boundscheck(False) def _group_prod(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=0): """ @@ -541,8 +542,9 @@ def _group_prod(floating[:, :] out, floating val, count floating[:, :] prodx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((out).shape, dtype=np.int64) @@ -582,7 +584,7 @@ group_prod_float64 = _group_prod['double'] @cython.cdivision(True) def _group_var(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1, int64_t ddof=1): @@ -591,10 +593,11 @@ def _group_var(floating[:, :] out, floating val, ct, oldmean floating[:, :] mean int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) assert min_count == -1, "'min_count' only used in add and prod" - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((out).shape, dtype=np.int64) @@ -639,7 +642,7 @@ group_var_float64 = _group_var['double'] @cython.boundscheck(False) def _group_mean(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): cdef: @@ -647,10 +650,11 @@ def _group_mean(floating[:, :] out, floating val, count floating[:, :] sumx int64_t[:, :] nobs + Py_ssize_t len_values = len(values), len_labels = len(labels) assert min_count == -1, "'min_count' only used in add and prod" - if not len(values) == len(labels): + if len_values != len_labels: raise ValueError("len(index) != len(labels)") nobs = np.zeros((out).shape, dtype=np.int64) @@ -689,7 +693,7 @@ group_mean_float64 = _group_mean['double'] @cython.boundscheck(False) def _group_ohlc(floating[:, :] out, int64_t[:] counts, - floating[:, :] values, + ndarray[floating, ndim=2] values, const int64_t[:] labels, Py_ssize_t min_count=-1): """ @@ -740,7 +744,7 @@ group_ohlc_float64 = _group_ohlc['double'] @cython.boundscheck(False) @cython.wraparound(False) def group_quantile(ndarray[float64_t] out, - numeric[:] values, + ndarray[numeric, ndim=1] values, ndarray[int64_t] labels, ndarray[uint8_t] mask, float64_t q, @@ -1072,7 +1076,7 @@ def group_nth(rank_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_rank(float64_t[:, :] out, - rank_t[:, :] values, + ndarray[rank_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike, object ties_method="average", @@ -1424,7 +1428,7 @@ def group_min(groupby_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cummin(groupby_t[:, :] out, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): @@ -1484,7 +1488,7 @@ def group_cummin(groupby_t[:, :] out, @cython.boundscheck(False) @cython.wraparound(False) def group_cummax(groupby_t[:, :] out, - groupby_t[:, :] values, + ndarray[groupby_t, ndim=2] values, const int64_t[:] labels, int ngroups, bint is_datetimelike): diff --git a/pandas/tests/groupby/aggregate/test_cython.py b/pandas/tests/groupby/aggregate/test_cython.py index 5ddda264642de..87ebd8b5a27fb 100644 --- a/pandas/tests/groupby/aggregate/test_cython.py +++ b/pandas/tests/groupby/aggregate/test_cython.py @@ -236,3 +236,44 @@ def test_cython_with_timestamp_and_nat(op, data): result = df.groupby("a").aggregate(op) tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize( + "agg", + [ + "min", + "max", + "count", + "sum", + "prod", + "var", + "mean", + "median", + "ohlc", + "cumprod", + "cumsum", + "shift", + "any", + "all", + "quantile", + "first", + "last", + "rank", + "cummin", + "cummax", + ], +) +def test_read_only_buffer_source_agg(agg): + # https://github.com/pandas-dev/pandas/issues/36014 + df = DataFrame( + { + "sepal_length": [5.1, 4.9, 4.7, 4.6, 5.0], + "species": ["setosa", "setosa", "setosa", "setosa", "setosa"], + } + ) + df._mgr.blocks[0].values.flags.writeable = False + + result = df.groupby(["species"]).agg({"sepal_length": agg}) + expected = df.copy().groupby(["species"]).agg({"sepal_length": agg}) + + tm.assert_equal(result, expected) From b53dc8f8c0c9b1f5d958cd412289d96ce0532785 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Fri, 4 Sep 2020 09:32:41 -0500 Subject: [PATCH 417/632] CLN: use IS64 instead of is_platform_32bit #36108 (#36109) --- pandas/_libs/missing.pyx | 4 ++-- pandas/compat/__init__.py | 24 +------------------ pandas/tests/frame/test_api.py | 5 ++-- .../indexes/interval/test_interval_tree.py | 8 +++---- pandas/tests/indexing/test_coercion.py | 4 ++-- pandas/tests/io/formats/test_format.py | 4 ++-- pandas/tests/io/json/test_pandas.py | 8 +++---- pandas/tests/io/json/test_ujson.py | 12 ++++------ pandas/tests/test_algos.py | 6 ++--- pandas/util/_test_decorators.py | 4 ++-- 10 files changed, 26 insertions(+), 53 deletions(-) diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 771e8053ac9be..abf38265ddc6d 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -18,7 +18,7 @@ from pandas._libs.tslibs.nattype cimport ( from pandas._libs.tslibs.np_datetime cimport get_datetime64_value, get_timedelta64_value from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op -from pandas.compat import is_platform_32bit +from pandas.compat import IS64 cdef: float64_t INF = np.inf @@ -26,7 +26,7 @@ cdef: int64_t NPY_NAT = util.get_nat() - bint is_32bit = is_platform_32bit() + bint is_32bit = not IS64 cpdef bint checknull(object val): diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index ab2835932c95d..f2018a5c01711 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -8,7 +8,6 @@ * platform checker """ import platform -import struct import sys import warnings @@ -20,14 +19,6 @@ IS64 = sys.maxsize > 2 ** 32 -# ---------------------------------------------------------------------------- -# functions largely based / taken from the six module - -# Much of the code in this module comes from Benjamin Peterson's six library. -# The license for this library can be found in LICENSES/SIX and the code can be -# found at https://bitbucket.org/gutworth/six - - def set_function_name(f: F, name: str, cls) -> F: """ Bind the name/qualname attributes of the function. @@ -38,7 +29,6 @@ def set_function_name(f: F, name: str, cls) -> F: return f -# https://github.com/pandas-dev/pandas/pull/9123 def is_platform_little_endian() -> bool: """ Checking if the running platform is little endian. @@ -72,7 +62,7 @@ def is_platform_linux() -> bool: bool True if the running platform is linux. """ - return sys.platform == "linux2" + return sys.platform == "linux" def is_platform_mac() -> bool: @@ -87,18 +77,6 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" -def is_platform_32bit() -> bool: - """ - Checking if the running platform is 32-bit. - - Returns - ------- - bool - True if the running platform is 32-bit. - """ - return struct.calcsize("P") * 8 < 64 - - def _import_lzma(): """ Importing the `lzma` module. diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index b1c31a6f90133..8b5d0c7ade56c 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -6,11 +6,12 @@ import numpy as np import pytest +from pandas.compat import IS64, is_platform_windows import pandas.util._test_decorators as td from pandas.util._test_decorators import async_mark, skip_if_no import pandas as pd -from pandas import Categorical, DataFrame, Series, compat, date_range, timedelta_range +from pandas import Categorical, DataFrame, Series, date_range, timedelta_range import pandas._testing as tm @@ -254,7 +255,7 @@ def test_itertuples(self, float_frame): assert list(dfaa.itertuples()) == [(0, 1, 1), (1, 2, 2), (2, 3, 3)] # repr with int on 32-bit/windows - if not (compat.is_platform_windows() or compat.is_platform_32bit()): + if not (is_platform_windows() or not IS64): assert ( repr(list(df.itertuples(name=None))) == "[(0, 1, 4), (1, 2, 5), (2, 3, 6)]" diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 476ec1dd10b4b..ab6eac482211d 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -4,8 +4,8 @@ import pytest from pandas._libs.interval import IntervalTree +from pandas.compat import IS64 -from pandas import compat import pandas._testing as tm @@ -14,9 +14,7 @@ def skipif_32bit(param): Skip parameters in a parametrize on 32bit systems. Specifically used here to skip leaf_size parameters related to GH 23440. """ - marks = pytest.mark.skipif( - compat.is_platform_32bit(), reason="GH 23440: int type mismatch on 32bit" - ) + marks = pytest.mark.skipif(not IS64, reason="GH 23440: int type mismatch on 32bit") return pytest.param(param, marks=marks) @@ -181,7 +179,7 @@ def test_is_overlapping_trivial(self, closed, left, right): tree = IntervalTree(left, right, closed=closed) assert tree.is_overlapping is False - @pytest.mark.skipif(compat.is_platform_32bit(), reason="GH 23440") + @pytest.mark.skipif(not IS64, reason="GH 23440") def test_construction_overflow(self): # GH 25485 left, right = np.arange(101, dtype="int64"), [np.iinfo(np.int64).max] * 101 diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 1512c88a68778..1c5f00ff754a4 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -5,7 +5,7 @@ import numpy as np import pytest -import pandas.compat as compat +from pandas.compat import IS64, is_platform_windows import pandas as pd import pandas._testing as tm @@ -1041,7 +1041,7 @@ def test_replace_series(self, how, to_key, from_key): from_key == "complex128" and to_key in ("int64", "float64") ): - if compat.is_platform_32bit() or compat.is_platform_windows(): + if not IS64 or is_platform_windows(): pytest.skip(f"32-bit platform buggy: {from_key} -> {to_key}") # Expected: do not downcast by replacement diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 1bbfe4d7d74af..22942ed75d0f3 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -18,7 +18,7 @@ import pytest import pytz -from pandas.compat import is_platform_32bit, is_platform_windows +from pandas.compat import IS64, is_platform_windows import pandas.util._test_decorators as td import pandas as pd @@ -41,7 +41,7 @@ import pandas.io.formats.format as fmt import pandas.io.formats.printing as printing -use_32bit_repr = is_platform_windows() or is_platform_32bit() +use_32bit_repr = is_platform_windows() or not IS64 @pytest.fixture(params=["string", "pathlike", "buffer"]) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 2022abbaee323..59d64e1a6e909 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import is_platform_32bit, is_platform_windows +from pandas.compat import IS64, is_platform_windows import pandas.util._test_decorators as td import pandas as pd @@ -154,7 +154,7 @@ def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype, int_frame) expected = int_frame if ( numpy - and (is_platform_32bit() or is_platform_windows()) + and (not IS64 or is_platform_windows()) and not dtype and orient != "split" ): @@ -361,9 +361,7 @@ def test_frame_infinity(self, orient, inf, dtype): result = read_json(df.to_json(), dtype=dtype) assert np.isnan(result.iloc[0, 2]) - @pytest.mark.skipif( - is_platform_32bit(), reason="not compliant on 32-bit, xref #15865" - ) + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") @pytest.mark.parametrize( "value,precision,expected_val", [ diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index f969cbca9f427..e2007e07c572a 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -15,7 +15,7 @@ import pandas._libs.json as ujson from pandas._libs.tslib import Timestamp -import pandas.compat as compat +from pandas.compat import IS64, is_platform_windows from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, Timedelta, date_range import pandas._testing as tm @@ -53,7 +53,7 @@ def get_int32_compat_dtype(numpy, orient): # See GH#32527 dtype = np.int64 if not ((numpy is None or orient == "index") or (numpy is True and orient is None)): - if compat.is_platform_windows(): + if is_platform_windows(): dtype = np.int32 else: dtype = np.intp @@ -62,9 +62,7 @@ def get_int32_compat_dtype(numpy, orient): class TestUltraJSONTests: - @pytest.mark.skipif( - compat.is_platform_32bit(), reason="not compliant on 32-bit, xref #15865" - ) + @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865") def test_encode_decimal(self): sut = decimal.Decimal("1337.1337") encoded = ujson.encode(sut, double_precision=15) @@ -561,7 +559,7 @@ def test_encode_long_conversion(self): assert long_input == ujson.decode(output) @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)]) - @pytest.mark.xfail(not compat.IS64, reason="GH-35288") + @pytest.mark.xfail(not IS64, reason="GH-35288") def test_dumps_ints_larger_than_maxsize(self, bigNum): # GH34395 bigNum = sys.maxsize + 1 @@ -703,7 +701,7 @@ def test_int_array(self, any_int_dtype): tm.assert_numpy_array_equal(arr_input, arr_output) def test_int_max(self, any_int_dtype): - if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit(): + if any_int_dtype in ("int64", "uint64") and not IS64: pytest.skip("Cannot test 64-bit integer on 32-bit platform") klass = np.dtype(any_int_dtype).type diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 59c6a5d53e7bb..72a679d980641 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -8,6 +8,7 @@ from pandas._libs import algos as libalgos, hashtable as ht from pandas._libs.groupby import group_var_float32, group_var_float64 +from pandas.compat import IS64 from pandas.compat.numpy import np_array_datetime64_compat import pandas.util._test_decorators as td @@ -29,7 +30,6 @@ IntervalIndex, Series, Timestamp, - compat, ) import pandas._testing as tm import pandas.core.algorithms as algos @@ -1137,7 +1137,7 @@ def test_dropna(self): ) # 32-bit linux has a different ordering - if not compat.is_platform_32bit(): + if IS64: result = Series([10.3, 5.0, 5.0, None]).value_counts(dropna=False) expected = Series([2, 1, 1], index=[5.0, 10.3, np.nan]) tm.assert_series_equal(result, expected) @@ -1170,7 +1170,7 @@ def test_value_counts_uint64(self): result = algos.value_counts(arr) # 32-bit linux has a different ordering - if not compat.is_platform_32bit(): + if IS64: tm.assert_series_equal(result, expected) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index ca7b99492bbf7..78facd6694635 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -31,7 +31,7 @@ def test_foo(): import numpy as np import pytest -from pandas.compat import is_platform_32bit, is_platform_windows +from pandas.compat import IS64, is_platform_windows from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import _np_version @@ -180,7 +180,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None): _skip_if_no_mpl(), reason="Missing matplotlib dependency" ) skip_if_mpl = pytest.mark.skipif(not _skip_if_no_mpl(), reason="matplotlib is present") -skip_if_32bit = pytest.mark.skipif(is_platform_32bit(), reason="skipping for 32 bit") +skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit") skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows") skip_if_windows_python_3 = pytest.mark.skipif( is_platform_windows(), reason="not used on win32" From 9ad63636cad78b6cfbbdb240c1dfcb72158eeb69 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Fri, 4 Sep 2020 17:17:09 +0200 Subject: [PATCH 418/632] REGR: ensure closed attribute of IntervalIndex is preserved in pickle roundtrip (#36118) --- doc/source/whatsnew/v1.1.2.rst | 3 ++- pandas/core/indexes/interval.py | 2 +- pandas/tests/indexes/interval/test_interval.py | 7 +++++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 7195f3d7a3885..232d0c4b4bbcd 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -18,8 +18,9 @@ Fixed regressions - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) +- Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) - +- .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 08f9bd51de77b..419ff81a2a478 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -193,7 +193,7 @@ def func(intvidx_self, other, sort=False): class IntervalIndex(IntervalMixin, ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] - _attributes = ["name"] + _attributes = ["name", "closed"] # we would like our indexing holder to defer to us _defer_to_indexing = True diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 2755b186f3eae..a20e542b1edd7 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -874,6 +874,13 @@ def test_get_value_non_scalar_errors(self, key): with tm.assert_produces_warning(FutureWarning): idx.get_value(s, key) + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) + def test_dir(): # GH#27571 dir(interval_index) should not raise From b8e4a0902d5472b27bccc1491f94fe79137e8f33 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Fri, 4 Sep 2020 18:03:45 +0100 Subject: [PATCH 419/632] TYP: misc fixes for numpy types 2 (#36099) --- pandas/core/dtypes/cast.py | 2 +- pandas/core/dtypes/common.py | 5 ++--- pandas/core/reshape/merge.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index e6b4cb598989b..1489e08d82bf0 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -651,7 +651,7 @@ def infer_dtype_from_scalar(val, pandas_dtype: bool = False) -> Tuple[DtypeObj, If False, scalar belongs to pandas extension types is inferred as object """ - dtype = np.dtype(object) + dtype: DtypeObj = np.dtype(object) # a 1-element ndarray if isinstance(val, np.ndarray): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 1e70ff90fcd44..0bf032725547e 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -108,7 +108,7 @@ def ensure_str(value: Union[bytes, Any]) -> str: return value -def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.array: +def ensure_int_or_float(arr: ArrayLike, copy: bool = False) -> np.ndarray: """ Ensure that an dtype array of some integer dtype has an int64 dtype if possible. @@ -1388,8 +1388,7 @@ def is_bool_dtype(arr_or_dtype) -> bool: # guess this return arr_or_dtype.is_object and arr_or_dtype.inferred_type == "boolean" elif is_extension_array_dtype(arr_or_dtype): - dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) - return dtype._is_boolean + return getattr(arr_or_dtype, "dtype", arr_or_dtype)._is_boolean return issubclass(dtype.type, np.bool_) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 01e20f49917ac..602ff226f8878 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1870,7 +1870,7 @@ def _right_outer_join(x, y, max_groups): def _factorize_keys( lk: ArrayLike, rk: ArrayLike, sort: bool = True, how: str = "inner" -) -> Tuple[np.array, np.array, int]: +) -> Tuple[np.ndarray, np.ndarray, int]: """ Encode left and right keys as enumerated types. From 1a21836a9ade84cc95fca9e0dce42eb495b36384 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 10:34:49 -0700 Subject: [PATCH 420/632] TYP: io (#36120) --- pandas/io/excel/_util.py | 12 +++++++----- pandas/io/formats/css.py | 9 ++++++--- setup.cfg | 3 --- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index 285aeaf7d4c6e..a4b5b61734ab7 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -1,3 +1,5 @@ +from typing import List + from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_integer, is_list_like @@ -56,7 +58,7 @@ def get_writer(engine_name): raise ValueError(f"No Excel writer '{engine_name}'") from err -def _excel2num(x): +def _excel2num(x: str) -> int: """ Convert Excel column name like 'AB' to 0-based column index. @@ -88,7 +90,7 @@ def _excel2num(x): return index - 1 -def _range2cols(areas): +def _range2cols(areas: str) -> List[int]: """ Convert comma separated list of column names and ranges to indices. @@ -109,12 +111,12 @@ def _range2cols(areas): >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27] """ - cols = [] + cols: List[int] = [] for rng in areas.split(","): if ":" in rng: - rng = rng.split(":") - cols.extend(range(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) + rngs = rng.split(":") + cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1)) else: cols.append(_excel2num(rng)) diff --git a/pandas/io/formats/css.py b/pandas/io/formats/css.py index 4d6f03489725f..2e9ee192a1182 100644 --- a/pandas/io/formats/css.py +++ b/pandas/io/formats/css.py @@ -3,6 +3,7 @@ """ import re +from typing import Optional import warnings @@ -93,6 +94,7 @@ def __call__(self, declarations_str, inherited=None): props[prop] = val # 2. resolve relative font size + font_size: Optional[float] if props.get("font-size"): if "font-size" in inherited: em_pt = inherited["font-size"] @@ -173,10 +175,11 @@ def _error(): warnings.warn(f"Unhandled size: {repr(in_val)}", CSSWarning) return self.size_to_pt("1!!default", conversions=conversions) - try: - val, unit = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val).groups() - except AttributeError: + match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val) + if match is None: return _error() + + val, unit = match.groups() if val == "": # hack for 'large' etc. val = 1 diff --git a/setup.cfg b/setup.cfg index 29c731848de8e..e7d7df7ff19a2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -279,9 +279,6 @@ check_untyped_defs=False [mypy-pandas.io.formats.console] check_untyped_defs=False -[mypy-pandas.io.formats.css] -check_untyped_defs=False - [mypy-pandas.io.formats.csvs] check_untyped_defs=False From 9dc1fd7f90a9264dd0242c09b73e0c8e485fc4ba Mon Sep 17 00:00:00 2001 From: Asish Mahapatra Date: Fri, 4 Sep 2020 16:29:25 -0400 Subject: [PATCH 421/632] BUG: incorrect year returned in isocalendar for certain dates (#36050) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/_libs/tslibs/ccalendar.pyx | 4 ++-- pandas/tests/series/test_datetime_values.py | 3 +++ pandas/tests/tslibs/test_ccalendar.py | 15 +++++++++++++++ 4 files changed, 21 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 232d0c4b4bbcd..39850905f60fa 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -33,6 +33,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 6cce2f5e1fd95..d8c83daa661a3 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -201,10 +201,10 @@ cpdef iso_calendar_t get_iso_calendar(int year, int month, int day) nogil: iso_week = 1 iso_year = year - if iso_week == 1 and doy > 7: + if iso_week == 1 and month == 12: iso_year += 1 - elif iso_week >= 52 and doy < 7: + elif iso_week >= 52 and month == 1: iso_year -= 1 return iso_year, iso_week, dow + 1 diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index d2ad9c8c398ea..723bd303b1974 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -682,6 +682,9 @@ def test_setitem_with_different_tz(self): [[pd.NaT], [[np.NaN, np.NaN, np.NaN]]], [["2019-12-31", "2019-12-29"], [[2020, 1, 2], [2019, 52, 7]]], [["2010-01-01", pd.NaT], [[2009, 53, 5], [np.NaN, np.NaN, np.NaN]]], + # see GH#36032 + [["2016-01-08", "2016-01-04"], [[2016, 1, 5], [2016, 1, 1]]], + [["2016-01-07", "2016-01-01"], [[2016, 1, 4], [2015, 53, 5]]], ], ) def test_isocalendar(self, input_series, expected_output): diff --git a/pandas/tests/tslibs/test_ccalendar.py b/pandas/tests/tslibs/test_ccalendar.py index aab86d3a2df69..1ff700fdc23a3 100644 --- a/pandas/tests/tslibs/test_ccalendar.py +++ b/pandas/tests/tslibs/test_ccalendar.py @@ -1,10 +1,13 @@ from datetime import date, datetime +from hypothesis import given, strategies as st import numpy as np import pytest from pandas._libs.tslibs import ccalendar +import pandas as pd + @pytest.mark.parametrize( "date_tuple,expected", @@ -48,3 +51,15 @@ def test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tu expected_from_date_isocalendar = date(*input_date_tuple).isocalendar() assert result == expected_from_date_isocalendar assert result == expected_iso_tuple + + +@given( + st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), + ) +) +def test_isocalendar(dt): + expected = dt.isocalendar() + result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day) + assert result == expected From 7d736418a907aab3e32f6a9e25fef9b47b1740f5 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 13:47:12 -0700 Subject: [PATCH 422/632] REF: use BlockManager.apply for DataFrameGroupBy.count (#35924) --- pandas/core/groupby/generic.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 537feace59fcb..53edd056a6802 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -77,7 +77,7 @@ from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same import pandas.core.indexes.base as ibase -from pandas.core.internals import BlockManager, make_block +from pandas.core.internals import BlockManager from pandas.core.series import Series from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba @@ -1750,20 +1750,24 @@ def count(self) -> DataFrame: ids, _, ngroups = self.grouper.group_info mask = ids != -1 - # TODO(2DEA): reshape would not be necessary with 2D EAs - vals = ((mask & ~isna(blk.values).reshape(blk.shape)) for blk in data.blocks) - locs = (blk.mgr_locs for blk in data.blocks) + def hfunc(bvalues: ArrayLike) -> ArrayLike: + # TODO(2DEA): reshape would not be necessary with 2D EAs + if bvalues.ndim == 1: + # EA + masked = mask & ~isna(bvalues).reshape(1, -1) + else: + masked = mask & ~isna(bvalues) - counted = ( - lib.count_level_2d(x, labels=ids, max_bin=ngroups, axis=1) for x in vals - ) - blocks = [make_block(val, placement=loc) for val, loc in zip(counted, locs)] + counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups, axis=1) + return counted + + new_mgr = data.apply(hfunc) # If we are grouping on categoricals we want unobserved categories to # return zero, rather than the default of NaN which the reindexing in # _wrap_agged_blocks() returns. GH 35028 with com.temp_setattr(self, "observed", True): - result = self._wrap_agged_blocks(blocks, items=data.items) + result = self._wrap_agged_blocks(new_mgr.blocks, items=data.items) return self._reindex_output(result, fill_value=0) From fadb72cf5ef8489e409d4d33625bd16a76fa7a42 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Fri, 4 Sep 2020 22:48:43 +0200 Subject: [PATCH 423/632] REGR: fix consolidation/cache issue with take operation (#36114) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/generic.py | 2 ++ pandas/tests/frame/test_block_internals.py | 23 ++++++++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 39850905f60fa..d1a66256454ca 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) +- Fix regression in invalid cache after an indexing operation; this can manifest when setting which does not update the data (:issue:`35521`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6c8780a0fc186..2af323ccc1dd3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3534,6 +3534,8 @@ class max_speed nv.validate_take(tuple(), kwargs) + self._consolidate_inplace() + new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True ) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 00cfa6265934f..4a85da72bc8b1 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -658,3 +658,26 @@ def test_update_inplace_sets_valid_block_values(): # smoketest for OP bug from GH#35731 assert df.isnull().sum().sum() == 0 + + +def test_nonconsolidated_item_cache_take(): + # https://github.com/pandas-dev/pandas/issues/35521 + + # create non-consolidated dataframe with object dtype columns + df = pd.DataFrame() + df["col1"] = pd.Series(["a"], dtype=object) + df["col2"] = pd.Series([0], dtype=object) + + # access column (item cache) + df["col1"] == "A" + # take operation + # (regression was that this consolidated but didn't reset item cache, + # resulting in an invalid cache and the .at operation not working properly) + df[df["col2"] == 0] + + # now setting value should update actual dataframe + df.at[0, "col1"] = "A" + + expected = pd.DataFrame({"col1": ["A"], "col2": [0]}, dtype=object) + tm.assert_frame_equal(df, expected) + assert df.at[0, "col1"] == "A" From bc0e5073bfbc2a054870b98b280b353b467156fe Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 13:51:21 -0700 Subject: [PATCH 424/632] De-privatize (#36107) --- pandas/_libs/indexing.pyx | 2 +- pandas/_libs/tslibs/parsing.pyx | 4 ++-- pandas/core/arrays/datetimes.py | 4 ++-- pandas/core/arrays/timedeltas.py | 8 ++++---- pandas/core/common.py | 4 ++-- pandas/core/computation/common.py | 2 +- pandas/core/computation/ops.py | 6 +++--- pandas/core/computation/pytables.py | 8 ++++---- pandas/core/dtypes/common.py | 16 ++++++++-------- pandas/core/dtypes/inference.py | 8 ++++---- pandas/core/dtypes/missing.py | 8 ++++---- pandas/core/groupby/ops.py | 8 +++----- pandas/core/indexes/timedeltas.py | 4 ++-- pandas/core/indexing.py | 10 +++++----- pandas/core/internals/blocks.py | 14 +++++++------- pandas/core/internals/concat.py | 4 ++-- pandas/core/internals/managers.py | 6 +++--- pandas/core/nanops.py | 6 +++--- pandas/core/reshape/melt.py | 4 ++-- pandas/core/reshape/util.py | 4 ++-- pandas/core/tools/datetimes.py | 8 ++++---- pandas/io/formats/format.py | 8 ++++---- pandas/tests/dtypes/test_common.py | 8 ++++---- pandas/tests/tslibs/test_parsing.py | 10 +++++----- 24 files changed, 81 insertions(+), 83 deletions(-) diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx index f9aedeb8ad93e..7966fe8d4f045 100644 --- a/pandas/_libs/indexing.pyx +++ b/pandas/_libs/indexing.pyx @@ -1,4 +1,4 @@ -cdef class _NDFrameIndexerBase: +cdef class NDFrameIndexerBase: """ A base class for _NDFrameIndexer for fast instantiation and attribute access. """ diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 7478179df3b75..aeb1be121bc9e 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -771,7 +771,7 @@ class _timelex: _DATEUTIL_LEXER_SPLIT = _timelex.split -def _format_is_iso(f) -> bint: +def format_is_iso(f: str) -> bint: """ Does format match the iso8601 set that can be handled by the C parser? Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different @@ -789,7 +789,7 @@ def _format_is_iso(f) -> bint: return False -def _guess_datetime_format( +def guess_datetime_format( dt_str, bint dayfirst=False, dt_str_parse=du_parse, diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8b2bb7832b5d0..1bea3a9eb137e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -602,9 +602,9 @@ def astype(self, dtype, copy=True): # Rendering Methods def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): - from pandas.io.formats.format import _get_format_datetime64_from_values + from pandas.io.formats.format import get_format_datetime64_from_values - fmt = _get_format_datetime64_from_values(self, date_format) + fmt = get_format_datetime64_from_values(self, date_format) return tslib.format_array_from_datetime( self.asi8.ravel(), tz=self.tz, format=fmt, na_rep=na_rep diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 3e21d01355dda..2d694c469b3a9 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -379,14 +379,14 @@ def median( # Rendering Methods def _formatter(self, boxed=False): - from pandas.io.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import get_format_timedelta64 - return _get_format_timedelta64(self, box=True) + return get_format_timedelta64(self, box=True) def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs): - from pandas.io.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import get_format_timedelta64 - formatter = _get_format_timedelta64(self._data, na_rep) + formatter = get_format_timedelta64(self._data, na_rep) return np.array([formatter(x) for x in self._data.ravel()]).reshape(self.shape) # ---------------------------------------------------------------- diff --git a/pandas/core/common.py b/pandas/core/common.py index 6fd4700ab7f3f..279d512e5a046 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -31,7 +31,7 @@ ABCIndexClass, ABCSeries, ) -from pandas.core.dtypes.inference import _iterable_not_string +from pandas.core.dtypes.inference import iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa @@ -61,7 +61,7 @@ def flatten(l): flattened : generator """ for el in l: - if _iterable_not_string(el): + if iterable_not_string(el): for s in flatten(el): yield s else: diff --git a/pandas/core/computation/common.py b/pandas/core/computation/common.py index 327ec21c3c11c..8a9583c465f50 100644 --- a/pandas/core/computation/common.py +++ b/pandas/core/computation/common.py @@ -5,7 +5,7 @@ from pandas._config import get_option -def _ensure_decoded(s): +def ensure_decoded(s): """ If we have bytes, decode them to unicode. """ diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index e55df1e1d8155..b2144c45c6323 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -15,7 +15,7 @@ from pandas.core.dtypes.common import is_list_like, is_scalar import pandas.core.common as com -from pandas.core.computation.common import _ensure_decoded, result_type_many +from pandas.core.computation.common import ensure_decoded, result_type_many from pandas.core.computation.scope import _DEFAULT_GLOBALS from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded @@ -466,7 +466,7 @@ def stringify(value): v = rhs.value if isinstance(v, (int, float)): v = stringify(v) - v = Timestamp(_ensure_decoded(v)) + v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert("UTC") self.rhs.update(v) @@ -475,7 +475,7 @@ def stringify(value): v = lhs.value if isinstance(v, (int, float)): v = stringify(v) - v = Timestamp(_ensure_decoded(v)) + v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert("UTC") self.lhs.update(v) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index f1b11a6869c2b..8dd7c1a22d0ae 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -14,7 +14,7 @@ import pandas as pd import pandas.core.common as com from pandas.core.computation import expr, ops, scope as _scope -from pandas.core.computation.common import _ensure_decoded +from pandas.core.computation.common import ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import UndefinedVariableError, is_term from pandas.core.construction import extract_array @@ -189,12 +189,12 @@ def stringify(value): encoder = pprint_thing return encoder(value) - kind = _ensure_decoded(self.kind) - meta = _ensure_decoded(self.meta) + kind = ensure_decoded(self.kind) + meta = ensure_decoded(self.meta) if kind == "datetime64" or kind == "datetime": if isinstance(v, (int, float)): v = stringify(v) - v = _ensure_decoded(v) + v = ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert("UTC") diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 0bf032725547e..6ad46eb967275 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -635,8 +635,8 @@ def is_dtype_equal(source, target) -> bool: False """ try: - source = _get_dtype(source) - target = _get_dtype(target) + source = get_dtype(source) + target = get_dtype(target) return source == target except (TypeError, AttributeError): @@ -984,10 +984,10 @@ def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: - tipo = _get_dtype(arr_or_dtype) + tipo = get_dtype(arr_or_dtype) except TypeError: if is_datetime64tz_dtype(arr_or_dtype): - tipo = _get_dtype(arr_or_dtype.dtype) + tipo = get_dtype(arr_or_dtype.dtype) else: return False return tipo == DT64NS_DTYPE or getattr(tipo, "base", None) == DT64NS_DTYPE @@ -1372,7 +1372,7 @@ def is_bool_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: - dtype = _get_dtype(arr_or_dtype) + dtype = get_dtype(arr_or_dtype) except TypeError: return False @@ -1557,13 +1557,13 @@ def _is_dtype(arr_or_dtype, condition) -> bool: if arr_or_dtype is None: return False try: - dtype = _get_dtype(arr_or_dtype) + dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError, UnicodeEncodeError): return False return condition(dtype) -def _get_dtype(arr_or_dtype) -> DtypeObj: +def get_dtype(arr_or_dtype) -> DtypeObj: """ Get the dtype instance associated with an array or dtype object. @@ -1694,7 +1694,7 @@ def infer_dtype_from_object(dtype): try: return infer_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): - # Handles cases like _get_dtype(int) i.e., + # Handles cases like get_dtype(int) i.e., # Python objects that are valid dtypes # (unlike user-defined types, in general) # diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index d1607b5ede6c3..329c4445b05bc 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -68,7 +68,7 @@ def is_number(obj) -> bool: return isinstance(obj, (Number, np.number)) -def _iterable_not_string(obj) -> bool: +def iterable_not_string(obj) -> bool: """ Check if the object is an iterable but not a string. @@ -83,11 +83,11 @@ def _iterable_not_string(obj) -> bool: Examples -------- - >>> _iterable_not_string([1, 2, 3]) + >>> iterable_not_string([1, 2, 3]) True - >>> _iterable_not_string("foo") + >>> iterable_not_string("foo") False - >>> _iterable_not_string(1) + >>> iterable_not_string(1) False """ return isinstance(obj, abc.Iterable) and not isinstance(obj, str) diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index f59bb31af2828..163500525dbd8 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -338,7 +338,7 @@ def notna(obj): notnull = notna -def _isna_compat(arr, fill_value=np.nan) -> bool: +def isna_compat(arr, fill_value=np.nan) -> bool: """ Parameters ---------- @@ -496,7 +496,7 @@ def array_equals(left: ArrayLike, right: ArrayLike) -> bool: return array_equivalent(left, right, dtype_equal=True) -def _infer_fill_value(val): +def infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped @@ -516,11 +516,11 @@ def _infer_fill_value(val): return np.nan -def _maybe_fill(arr, fill_value=np.nan): +def maybe_fill(arr, fill_value=np.nan): """ if we have a compatible fill_value and arr dtype, then fill """ - if _isna_compat(arr, fill_value): + if isna_compat(arr, fill_value): arr.fill(fill_value) return arr diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 4dd5b7f30e7f0..c076b6e2e181b 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -37,7 +37,7 @@ is_timedelta64_dtype, needs_i8_conversion, ) -from pandas.core.dtypes.missing import _maybe_fill, isna +from pandas.core.dtypes.missing import isna, maybe_fill import pandas.core.algorithms as algorithms from pandas.core.base import SelectionMixin @@ -524,13 +524,11 @@ def _cython_operation( codes, _, _ = self.group_info if kind == "aggregate": - result = _maybe_fill( - np.empty(out_shape, dtype=out_dtype), fill_value=np.nan - ) + result = maybe_fill(np.empty(out_shape, dtype=out_dtype), fill_value=np.nan) counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate(result, counts, values, codes, func, min_count) elif kind == "transform": - result = _maybe_fill( + result = maybe_fill( np.empty_like(values, dtype=out_dtype), fill_value=np.nan ) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dccc8369c5366..85c8396dfd1fe 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -177,9 +177,9 @@ def _simple_new(cls, values: TimedeltaArray, name: Label = None): @property def _formatter_func(self): - from pandas.io.formats.format import _get_format_timedelta64 + from pandas.io.formats.format import get_format_timedelta64 - return _get_format_timedelta64(self, box=True) + return get_format_timedelta64(self, box=True) # ------------------------------------------------------------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index dd81823055390..cfb17b9498a36 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -4,7 +4,7 @@ from pandas._config.config import option_context -from pandas._libs.indexing import _NDFrameIndexerBase +from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.errors import AbstractMethodError, InvalidIndexError from pandas.util._decorators import doc @@ -22,7 +22,7 @@ ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCMultiIndex, ABCSeries -from pandas.core.dtypes.missing import _infer_fill_value, isna +from pandas.core.dtypes.missing import infer_fill_value, isna import pandas.core.common as com from pandas.core.construction import array as pd_array @@ -583,7 +583,7 @@ def iat(self) -> "_iAtIndexer": return _iAtIndexer("iat", self) -class _LocationIndexer(_NDFrameIndexerBase): +class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis = None @@ -1604,7 +1604,7 @@ def _setitem_with_indexer(self, indexer, value): return # add a new item with the dtype setup - self.obj[key] = _infer_fill_value(value) + self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple( indexer, self.obj.axes @@ -2017,7 +2017,7 @@ def _align_frame(self, indexer, df: ABCDataFrame): raise ValueError("Incompatible indexer with DataFrame") -class _ScalarAccessIndexer(_NDFrameIndexerBase): +class _ScalarAccessIndexer(NDFrameIndexerBase): """ Access scalars quickly. """ diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ad388ef3f53b0..b2305736f9d46 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -56,7 +56,7 @@ ABCPandasArray, ABCSeries, ) -from pandas.core.dtypes.missing import _isna_compat, is_valid_nat_for_dtype, isna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat import pandas.core.algorithms as algos from pandas.core.array_algos.transforms import shift @@ -487,7 +487,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] ): return blocks - return _extend_blocks([b.downcast(downcast) for b in blocks]) + return extend_blocks([b.downcast(downcast) for b in blocks]) def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """ @@ -2474,7 +2474,7 @@ def _maybe_downcast(self, blocks: List["Block"], downcast=None) -> List["Block"] return blocks # split and convert the blocks - return _extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) + return extend_blocks([b.convert(datetime=True, numeric=False) for b in blocks]) def _can_hold_element(self, element: Any) -> bool: return True @@ -2503,7 +2503,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): result = b._replace_single( to_rep, v, inplace=inplace, regex=regex, convert=convert ) - result_blocks = _extend_blocks(result, result_blocks) + result_blocks = extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks @@ -2514,7 +2514,7 @@ def replace(self, to_replace, value, inplace=False, regex=False, convert=True): result = b._replace_single( to_rep, value, inplace=inplace, regex=regex, convert=convert ) - result_blocks = _extend_blocks(result, result_blocks) + result_blocks = extend_blocks(result, result_blocks) blocks = result_blocks return result_blocks @@ -2769,7 +2769,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None): # ----------------------------------------------------------------- -def _extend_blocks(result, blocks=None): +def extend_blocks(result, blocks=None): """ return a new extended blocks, given the result """ if blocks is None: blocks = [] @@ -2860,7 +2860,7 @@ def _putmask_smart(v: np.ndarray, mask: np.ndarray, n) -> np.ndarray: else: # make sure that we have a nullable type # if we have nulls - if not _isna_compat(v, nn[0]): + if not isna_compat(v, nn[0]): pass elif not (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype)): # only compare integers/floats diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 88839d2211f81..b45f0890cafa4 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -10,7 +10,7 @@ from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ( - _get_dtype, + get_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -200,7 +200,7 @@ def dtype(self): if not self.needs_filling: return self.block.dtype else: - return _get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0]) + return get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0]) @cache_readonly def is_na(self): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 2e3098d94afcb..753b949f7c802 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -54,8 +54,8 @@ DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, - _extend_blocks, _safe_reshape, + extend_blocks, get_block_type, make_block, ) @@ -406,7 +406,7 @@ def apply( if not ignore_failures: raise continue - result_blocks = _extend_blocks(applied, result_blocks) + result_blocks = extend_blocks(applied, result_blocks) if ignore_failures: return self._combine(result_blocks) @@ -1868,7 +1868,7 @@ def _consolidate(blocks): merged_blocks = _merge_blocks( list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate ) - new_blocks = _extend_blocks(merged_blocks, new_blocks) + new_blocks = extend_blocks(merged_blocks, new_blocks) return new_blocks diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index e3f16a3ef4f90..6fdde22a1c514 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -13,7 +13,7 @@ from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask from pandas.core.dtypes.common import ( - _get_dtype, + get_dtype, is_any_int_dtype, is_bool_dtype, is_complex, @@ -678,7 +678,7 @@ def _get_counts_nanvar( count : scalar or array d : scalar or array """ - dtype = _get_dtype(dtype) + dtype = get_dtype(dtype) count = _get_counts(value_counts, mask, axis, dtype=dtype) d = count - dtype.type(ddof) @@ -1234,7 +1234,7 @@ def _get_counts( ------- count : scalar or array """ - dtype = _get_dtype(dtype) + dtype = get_dtype(dtype) if axis is None: if mask is not None: n = mask.size - mask.sum() diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 8724f7674f0c8..33ce5ed49b9c2 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -14,7 +14,7 @@ import pandas.core.common as com from pandas.core.indexes.api import Index, MultiIndex from pandas.core.reshape.concat import concat -from pandas.core.reshape.util import _tile_compat +from pandas.core.reshape.util import tile_compat from pandas.core.shared_docs import _shared_docs from pandas.core.tools.numeric import to_numeric @@ -136,7 +136,7 @@ def melt( result = frame._constructor(mdata, columns=mcolumns) if not ignore_index: - result.index = _tile_compat(frame.index, K) + result.index = tile_compat(frame.index, K) return result diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 6949270317f7c..a1bf3f8ee4119 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -48,10 +48,10 @@ def cartesian_product(X): # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) - return [_tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)] + return [tile_compat(np.repeat(x, b[i]), np.product(a[i])) for i, x in enumerate(X)] -def _tile_compat(arr, num: int): +def tile_compat(arr, num: int): """ Index compat for np.tile. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 8fcc5f74ea897..09a53d5a10ae6 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -20,8 +20,8 @@ from pandas._libs.tslibs import Timestamp, conversion, parsing from pandas._libs.tslibs.parsing import ( # noqa DateParseError, - _format_is_iso, - _guess_datetime_format, + format_is_iso, + guess_datetime_format, ) from pandas._libs.tslibs.strptime import array_strptime from pandas._typing import ArrayLike, Label, Timezone @@ -73,7 +73,7 @@ def _guess_datetime_format_for_array(arr, **kwargs): # Try to guess the format based on the first non-NaN element non_nan_elements = notna(arr).nonzero()[0] if len(non_nan_elements): - return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs) + return guess_datetime_format(arr[non_nan_elements[0]], **kwargs) def should_cache( @@ -387,7 +387,7 @@ def _convert_listlike_datetimes( # datetime strings, so in those cases don't use the inferred # format because this path makes process slower in this # special case - format_is_iso8601 = _format_is_iso(format) + format_is_iso8601 = format_is_iso(format) if format_is_iso8601: require_iso8601 = not infer_datetime_format format = None diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 461ef6823918e..3d441f6e737bc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1473,7 +1473,7 @@ def _format_strings(self) -> List[str]: fmt_values = format_array_from_datetime( values.asi8.ravel(), - format=_get_format_datetime64_from_values(values, self.date_format), + format=get_format_datetime64_from_values(values, self.date_format), na_rep=self.nat_rep, ).reshape(values.shape) return fmt_values.tolist() @@ -1636,7 +1636,7 @@ def _get_format_datetime64( return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep) -def _get_format_datetime64_from_values( +def get_format_datetime64_from_values( values: Union[np.ndarray, DatetimeArray, DatetimeIndex], date_format: Optional[str] ) -> Optional[str]: """ given values and a date_format, return a string format """ @@ -1677,13 +1677,13 @@ def __init__( self.box = box def _format_strings(self) -> List[str]: - formatter = self.formatter or _get_format_timedelta64( + formatter = self.formatter or get_format_timedelta64( self.values, nat_rep=self.nat_rep, box=self.box ) return [formatter(x) for x in self.values] -def _get_format_timedelta64( +def get_format_timedelta64( values: Union[np.ndarray, TimedeltaIndex, TimedeltaArray], nat_rep: str = "NaT", box: bool = False, diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index a6c526fcb008a..2db9a9a403e1c 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -649,8 +649,8 @@ def test_is_complex_dtype(): (IntervalDtype(), IntervalDtype()), ], ) -def test__get_dtype(input_param, result): - assert com._get_dtype(input_param) == result +def test_get_dtype(input_param, result): + assert com.get_dtype(input_param) == result @pytest.mark.parametrize( @@ -664,12 +664,12 @@ def test__get_dtype(input_param, result): (pd.DataFrame([1, 2]), "data type not understood"), ], ) -def test__get_dtype_fails(input_param, expected_error_message): +def test_get_dtype_fails(input_param, expected_error_message): # python objects # 2020-02-02 npdev changed error message expected_error_message += f"|Cannot interpret '{input_param}' as a data type" with pytest.raises(TypeError, match=expected_error_message): - com._get_dtype(input_param) + com.get_dtype(input_param) @pytest.mark.parametrize( diff --git a/pandas/tests/tslibs/test_parsing.py b/pandas/tests/tslibs/test_parsing.py index dc7421ea63464..70fa724464226 100644 --- a/pandas/tests/tslibs/test_parsing.py +++ b/pandas/tests/tslibs/test_parsing.py @@ -148,14 +148,14 @@ def test_parsers_month_freq(date_str, expected): ], ) def test_guess_datetime_format_with_parseable_formats(string, fmt): - result = parsing._guess_datetime_format(string) + result = parsing.guess_datetime_format(string) assert result == fmt @pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")]) def test_guess_datetime_format_with_dayfirst(dayfirst, expected): ambiguous_string = "01/01/2011" - result = parsing._guess_datetime_format(ambiguous_string, dayfirst=dayfirst) + result = parsing.guess_datetime_format(ambiguous_string, dayfirst=dayfirst) assert result == expected @@ -169,7 +169,7 @@ def test_guess_datetime_format_with_dayfirst(dayfirst, expected): ], ) def test_guess_datetime_format_with_locale_specific_formats(string, fmt): - result = parsing._guess_datetime_format(string) + result = parsing.guess_datetime_format(string) assert result == fmt @@ -189,7 +189,7 @@ def test_guess_datetime_format_with_locale_specific_formats(string, fmt): def test_guess_datetime_format_invalid_inputs(invalid_dt): # A datetime string must include a year, month and a day for it to be # guessable, in addition to being a string that looks like a datetime. - assert parsing._guess_datetime_format(invalid_dt) is None + assert parsing.guess_datetime_format(invalid_dt) is None @pytest.mark.parametrize( @@ -205,7 +205,7 @@ def test_guess_datetime_format_invalid_inputs(invalid_dt): ) def test_guess_datetime_format_no_padding(string, fmt): # see gh-11142 - result = parsing._guess_datetime_format(string) + result = parsing.guess_datetime_format(string) assert result == fmt From 3ca6d8f31c757be20b0b2bb127a1fe402be236cd Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 13:52:13 -0700 Subject: [PATCH 425/632] De-privatize functions in io.excel (#36104) --- pandas/io/excel/_base.py | 16 ++++++++-------- pandas/io/excel/_odswriter.py | 4 ++-- pandas/io/excel/_openpyxl.py | 4 ++-- pandas/io/excel/_util.py | 18 +++++------------- pandas/io/excel/_xlsxwriter.py | 4 ++-- pandas/io/excel/_xlwt.py | 4 ++-- 6 files changed, 21 insertions(+), 29 deletions(-) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 9bc1d7fedcb31..74eb65521f5b2 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -24,11 +24,11 @@ validate_header_arg, ) from pandas.io.excel._util import ( - _fill_mi_header, - _get_default_writer, - _maybe_convert_usecols, - _pop_header_name, + fill_mi_header, + get_default_writer, get_writer, + maybe_convert_usecols, + pop_header_name, ) from pandas.io.parsers import TextParser @@ -454,7 +454,7 @@ def parse( sheet = self.get_sheet_by_index(asheetname) data = self.get_sheet_data(sheet, convert_float) - usecols = _maybe_convert_usecols(usecols) + usecols = maybe_convert_usecols(usecols) if not data: output[asheetname] = DataFrame() @@ -473,10 +473,10 @@ def parse( if is_integer(skiprows): row += skiprows - data[row], control_row = _fill_mi_header(data[row], control_row) + data[row], control_row = fill_mi_header(data[row], control_row) if index_col is not None: - header_name, _ = _pop_header_name(data[row], index_col) + header_name, _ = pop_header_name(data[row], index_col) header_names.append(header_name) if is_list_like(index_col): @@ -645,7 +645,7 @@ def __new__(cls, path, engine=None, **kwargs): try: engine = config.get_option(f"io.excel.{ext}.writer") if engine == "auto": - engine = _get_default_writer(ext) + engine = get_default_writer(ext) except KeyError as err: raise ValueError(f"No engine for filetype: '{ext}'") from err cls = get_writer(engine) diff --git a/pandas/io/excel/_odswriter.py b/pandas/io/excel/_odswriter.py index f39391ae1fe7f..e7684012c1d4c 100644 --- a/pandas/io/excel/_odswriter.py +++ b/pandas/io/excel/_odswriter.py @@ -5,7 +5,7 @@ import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes from pandas.io.formats.excel import ExcelCell @@ -59,7 +59,7 @@ def write_cells( wks = Table(name=sheet_name) self.sheets[sheet_name] = wks - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): assert freeze_panes is not None self._create_freeze_panes(sheet_name, freeze_panes) diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 3c67902d41baa..89b581da6ed31 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -6,7 +6,7 @@ from pandas.compat._optional import import_optional_dependency from pandas.io.excel._base import ExcelWriter, _BaseExcelReader -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes if TYPE_CHECKING: from openpyxl.descriptors.serialisable import Serialisable @@ -385,7 +385,7 @@ def write_cells( wks.title = sheet_name self.sheets[sheet_name] = wks - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): wks.freeze_panes = wks.cell( row=freeze_panes[0] + 1, column=freeze_panes[1] + 1 ) diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index a4b5b61734ab7..47105916a9c78 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -23,7 +23,7 @@ def register_writer(klass): _writers[engine_name] = klass -def _get_default_writer(ext): +def get_default_writer(ext): """ Return the default writer for the given extension. @@ -123,7 +123,7 @@ def _range2cols(areas: str) -> List[int]: return cols -def _maybe_convert_usecols(usecols): +def maybe_convert_usecols(usecols): """ Convert `usecols` into a compatible format for parsing in `parsers.py`. @@ -152,7 +152,7 @@ def _maybe_convert_usecols(usecols): return usecols -def _validate_freeze_panes(freeze_panes): +def validate_freeze_panes(freeze_panes): if freeze_panes is not None: if len(freeze_panes) == 2 and all( isinstance(item, int) for item in freeze_panes @@ -169,15 +169,7 @@ def _validate_freeze_panes(freeze_panes): return False -def _trim_excel_header(row): - # trim header row so auto-index inference works - # xlrd uses '' , openpyxl None - while len(row) > 0 and (row[0] == "" or row[0] is None): - row = row[1:] - return row - - -def _fill_mi_header(row, control_row): +def fill_mi_header(row, control_row): """ Forward fill blank entries in row but only inside the same parent index. @@ -210,7 +202,7 @@ def _fill_mi_header(row, control_row): return row, control_row -def _pop_header_name(row, index_col): +def pop_header_name(row, index_col): """ Pop the header name for MultiIndex parsing. diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py index bdbb006ae93dc..53f0c94d12e4c 100644 --- a/pandas/io/excel/_xlsxwriter.py +++ b/pandas/io/excel/_xlsxwriter.py @@ -3,7 +3,7 @@ import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes class _XlsxStyler: @@ -208,7 +208,7 @@ def write_cells( style_dict = {"null": None} - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): wks.freeze_panes(*(freeze_panes)) for cell in cells: diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py index e1f72eb533c51..faebe526d17bd 100644 --- a/pandas/io/excel/_xlwt.py +++ b/pandas/io/excel/_xlwt.py @@ -3,7 +3,7 @@ import pandas._libs.json as json from pandas.io.excel._base import ExcelWriter -from pandas.io.excel._util import _validate_freeze_panes +from pandas.io.excel._util import validate_freeze_panes if TYPE_CHECKING: from xlwt import XFStyle @@ -48,7 +48,7 @@ def write_cells( wks = self.book.add_sheet(sheet_name) self.sheets[sheet_name] = wks - if _validate_freeze_panes(freeze_panes): + if validate_freeze_panes(freeze_panes): wks.set_panes_frozen(True) wks.set_horz_split_pos(freeze_panes[0]) wks.set_vert_split_pos(freeze_panes[1]) From 6016b177fae48c6bb46329af0290de4216246e7a Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Fri, 4 Sep 2020 21:59:51 +0100 Subject: [PATCH 426/632] TYP: activate Check for missing error codes (#36088) --- ci/code_checks.sh | 7 +++---- pandas/core/arrays/datetimelike.py | 5 ++--- pandas/core/groupby/categorical.py | 6 ++++-- pandas/io/common.py | 6 +++++- pandas/plotting/_matplotlib/core.py | 2 +- 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 852f66763683b..2e0f27fefca0b 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -230,10 +230,9 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" - # https://github.com/python/mypy/issues/7384 - # MSG='Check for missing error codes with # type: ignore' ; echo $MSG - # invgrep -R --include="*.py" -P '# type: ignore(?!\[)' pandas - # RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for missing error codes with # type: ignore' ; echo $MSG + invgrep -R --include="*.py" -P '# type:\s?ignore(?!\[)' pandas + RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for use of foo.__class__ instead of type(foo)' ; echo $MSG invgrep -R --include=*.{py,pyx} '\.__class__' pandas diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 1b5e1d81f00d6..5a44f87400b79 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -468,10 +468,9 @@ def _ndarray(self) -> np.ndarray: def _from_backing_data(self: _T, arr: np.ndarray) -> _T: # Note: we do not retain `freq` + # error: Too many arguments for "NDArrayBackedExtensionArray" # error: Unexpected keyword argument "dtype" for "NDArrayBackedExtensionArray" - # TODO: add my error code - # https://github.com/python/mypy/issues/7384 - return type(self)(arr, dtype=self.dtype) # type: ignore + return type(self)(arr, dtype=self.dtype) # type: ignore[call-arg] # ------------------------------------------------------------------ diff --git a/pandas/core/groupby/categorical.py b/pandas/core/groupby/categorical.py index 4d5acf527a867..3f04339803bf6 100644 --- a/pandas/core/groupby/categorical.py +++ b/pandas/core/groupby/categorical.py @@ -98,8 +98,10 @@ def recode_from_groupby( """ # we re-order to the original category orderings if sort: - return ci.set_categories(c.categories) # type: ignore [attr-defined] + # error: "CategoricalIndex" has no attribute "set_categories" + return ci.set_categories(c.categories) # type: ignore[attr-defined] # we are not sorting, so add unobserved to the end new_cats = c.categories[~c.categories.isin(ci.categories)] - return ci.add_categories(new_cats) # type: ignore [attr-defined] + # error: "CategoricalIndex" has no attribute "add_categories" + return ci.add_categories(new_cats) # type: ignore[attr-defined] diff --git a/pandas/io/common.py b/pandas/io/common.py index 9328f90ce67a3..2b13d54ec3aed 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -374,7 +374,11 @@ def get_compression_method( if isinstance(compression, Mapping): compression_args = dict(compression) try: - compression_method = compression_args.pop("method") # type: ignore + # error: Incompatible types in assignment (expression has type + # "Union[str, int, None]", variable has type "Optional[str]") + compression_method = compression_args.pop( # type: ignore[assignment] + "method" + ) except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 2d64e1b051444..147e4efd74bc3 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -656,7 +656,7 @@ def _plot(cls, ax: "Axes", x, y, style=None, is_errorbar: bool = False, **kwds): if style is not None: args = (x, y, style) else: - args = (x, y) # type:ignore[assignment] + args = (x, y) # type: ignore[assignment] return ax.plot(*args, **kwds) def _get_index_name(self) -> Optional[str]: From 9820c426a035495ca14dfb0fefe873e96f7483e0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 14:57:24 -0700 Subject: [PATCH 427/632] CLN: remove xfails/skips for no-longer-supported numpys (#36128) --- pandas/plotting/_matplotlib/style.py | 2 +- pandas/tests/arrays/sparse/test_array.py | 5 +- pandas/tests/frame/test_analytics.py | 58 ++++-------------------- pandas/tests/io/formats/test_to_csv.py | 4 -- pandas/tests/series/test_analytics.py | 5 -- 5 files changed, 10 insertions(+), 64 deletions(-) diff --git a/pandas/plotting/_matplotlib/style.py b/pandas/plotting/_matplotlib/style.py index 904a760a03e58..3e0954ef3d74d 100644 --- a/pandas/plotting/_matplotlib/style.py +++ b/pandas/plotting/_matplotlib/style.py @@ -11,7 +11,7 @@ def get_standard_colors( - num_colors=None, colormap=None, color_type: str = "default", color=None + num_colors: int, colormap=None, color_type: str = "default", color=None ): import matplotlib.pyplot as plt diff --git a/pandas/tests/arrays/sparse/test_array.py b/pandas/tests/arrays/sparse/test_array.py index 04215bfe1bedb..ece9367cea7fe 100644 --- a/pandas/tests/arrays/sparse/test_array.py +++ b/pandas/tests/arrays/sparse/test_array.py @@ -194,8 +194,7 @@ def test_constructor_inferred_fill_value(self, data, fill_value): @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) @pytest.mark.parametrize( - "size", - [pytest.param(0, marks=td.skip_if_np_lt("1.16", reason="NumPy-11383")), 10], + "size", [0, 10], ) @td.skip_if_no_scipy def test_from_spmatrix(self, size, format): @@ -904,7 +903,6 @@ def test_all(self, data, pos, neg): ([1.0, 2.0, 1.0], 1.0, 0.0), ], ) - @td.skip_if_np_lt("1.15") # prior didn't dispatch def test_numpy_all(self, data, pos, neg): # GH 17570 out = np.all(SparseArray(data)) @@ -956,7 +954,6 @@ def test_any(self, data, pos, neg): ([0.0, 2.0, 0.0], 2.0, 0.0), ], ) - @td.skip_if_np_lt("1.15") # prior didn't dispatch def test_numpy_any(self, data, pos, neg): # GH 17570 out = np.any(SparseArray(data)) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index b0ba0d991c9b0..f21b1d3dfe487 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1060,54 +1060,14 @@ def test_any_all_bool_only(self): (np.any, {"A": pd.Series([0.0, 1.0], dtype="float")}, True), (np.all, {"A": pd.Series([0, 1], dtype=int)}, False), (np.any, {"A": pd.Series([0, 1], dtype=int)}, True), - pytest.param( - np.all, - {"A": pd.Series([0, 1], dtype="M8[ns]")}, - False, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([0, 1], dtype="M8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.all, - {"A": pd.Series([1, 2], dtype="M8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([1, 2], dtype="M8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.all, - {"A": pd.Series([0, 1], dtype="m8[ns]")}, - False, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([0, 1], dtype="m8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.all, - {"A": pd.Series([1, 2], dtype="m8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), - pytest.param( - np.any, - {"A": pd.Series([1, 2], dtype="m8[ns]")}, - True, - marks=[td.skip_if_np_lt("1.15")], - ), + pytest.param(np.all, {"A": pd.Series([0, 1], dtype="M8[ns]")}, False,), + pytest.param(np.any, {"A": pd.Series([0, 1], dtype="M8[ns]")}, True,), + pytest.param(np.all, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,), + pytest.param(np.any, {"A": pd.Series([1, 2], dtype="M8[ns]")}, True,), + pytest.param(np.all, {"A": pd.Series([0, 1], dtype="m8[ns]")}, False,), + pytest.param(np.any, {"A": pd.Series([0, 1], dtype="m8[ns]")}, True,), + pytest.param(np.all, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,), + pytest.param(np.any, {"A": pd.Series([1, 2], dtype="m8[ns]")}, True,), (np.all, {"A": pd.Series([0, 1], dtype="category")}, False), (np.any, {"A": pd.Series([0, 1], dtype="category")}, True), (np.all, {"A": pd.Series([1, 2], dtype="category")}, True), @@ -1120,8 +1080,6 @@ def test_any_all_bool_only(self): "B": pd.Series([10, 20], dtype="m8[ns]"), }, True, - # In 1.13.3 and 1.14 np.all(df) returns a Timedelta here - marks=[td.skip_if_np_lt("1.15")], ), ], ) diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 753b8b6eda9c5..c40935b2cc5dd 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -11,10 +11,6 @@ class TestToCSV: - @pytest.mark.xfail( - (3, 6, 5) > sys.version_info, - reason=("Python csv library bug (see https://bugs.python.org/issue32255)"), - ) def test_to_csv_with_single_column(self): # see gh-18676, https://bugs.python.org/issue32255 # diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index ab8618eb0a7d4..e39083b709f38 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -3,8 +3,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm @@ -130,7 +128,6 @@ def test_is_monotonic(self): @pytest.mark.parametrize("func", [np.any, np.all]) @pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())]) - @td.skip_if_np_lt("1.15") def test_validate_any_all_out_keepdims_raises(self, kwargs, func): s = pd.Series([1, 2]) param = list(kwargs)[0] @@ -144,7 +141,6 @@ def test_validate_any_all_out_keepdims_raises(self, kwargs, func): with pytest.raises(ValueError, match=msg): func(s, **kwargs) - @td.skip_if_np_lt("1.15") def test_validate_sum_initial(self): s = pd.Series([1, 2]) msg = ( @@ -167,7 +163,6 @@ def test_validate_median_initial(self): # method instead of the ufunc. s.median(overwrite_input=True) - @td.skip_if_np_lt("1.15") def test_validate_stat_keepdims(self): s = pd.Series([1, 2]) msg = ( From de9c771e0bcea5119a7a69f331cd3d6c5bc4a5be Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 19:51:04 -0700 Subject: [PATCH 428/632] De-privatize (#36130) --- pandas/core/dtypes/dtypes.py | 4 +-- pandas/core/indexes/datetimes.py | 4 +-- pandas/core/indexing.py | 4 +-- pandas/core/util/hashing.py | 8 ++--- pandas/io/formats/format.py | 4 +-- pandas/io/formats/style.py | 20 ++++++------- pandas/plotting/_matplotlib/core.py | 29 +++++++++---------- pandas/plotting/_matplotlib/timeseries.py | 10 +++---- .../tests/indexing/multiindex/test_slice.py | 4 +-- pandas/tests/indexing/test_indexing.py | 12 ++++---- 10 files changed, 48 insertions(+), 51 deletions(-) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 8dc500dddeafa..e321fdd9b3a9b 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -395,7 +395,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: from pandas.core.dtypes.common import DT64NS_DTYPE, is_datetime64tz_dtype from pandas.core.util.hashing import ( - _combine_hash_arrays, + combine_hash_arrays, hash_array, hash_tuples, ) @@ -427,7 +427,7 @@ def _hash_categories(categories, ordered: Ordered = True) -> int: ) else: cat_array = [cat_array] - hashed = _combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) + hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(hashed) @classmethod diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6dcb9250812d0..3fd93a8159041 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -354,9 +354,9 @@ def _mpl_repr(self): @property def _formatter_func(self): - from pandas.io.formats.format import _get_format_datetime64 + from pandas.io.formats.format import get_format_datetime64 - formatter = _get_format_datetime64(is_dates_only=self._is_dates_only) + formatter = get_format_datetime64(is_dates_only=self._is_dates_only) return lambda x: f"'{formatter(x, tz=self.tz)}'" # -------------------------------------------------------------------- diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index cfb17b9498a36..fe2fec1c52063 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2291,7 +2291,7 @@ def need_slice(obj) -> bool: ) -def _non_reducing_slice(slice_): +def non_reducing_slice(slice_): """ Ensure that a slice doesn't reduce to a Series or Scalar. @@ -2330,7 +2330,7 @@ def pred(part) -> bool: return tuple(slice_) -def _maybe_numeric_slice(df, slice_, include_bool=False): +def maybe_numeric_slice(df, slice_, include_bool: bool = False): """ Want nice defaults for background_gradient that don't break with non-numeric data. But if slice_ is passed go with that. diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index d79b9f4092325..df082c7285ae8 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -24,7 +24,7 @@ _default_hash_key = "0123456789123456" -def _combine_hash_arrays(arrays, num_items: int): +def combine_hash_arrays(arrays, num_items: int): """ Parameters ---------- @@ -108,7 +108,7 @@ def hash_pandas_object( for _ in [None] ) arrays = itertools.chain([h], index_iter) - h = _combine_hash_arrays(arrays, 2) + h = combine_hash_arrays(arrays, 2) h = Series(h, index=obj.index, dtype="uint64", copy=False) @@ -131,7 +131,7 @@ def hash_pandas_object( # keep `hashes` specifically a generator to keep mypy happy _hashes = itertools.chain(hashes, index_hash_generator) hashes = (x for x in _hashes) - h = _combine_hash_arrays(hashes, num_items) + h = combine_hash_arrays(hashes, num_items) h = Series(h, index=obj.index, dtype="uint64", copy=False) else: @@ -175,7 +175,7 @@ def hash_tuples(vals, encoding="utf8", hash_key: str = _default_hash_key): hashes = ( _hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals ) - h = _combine_hash_arrays(hashes, len(vals)) + h = combine_hash_arrays(hashes, len(vals)) if is_tuple: h = h[0] diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3d441f6e737bc..3dc4290953360 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1624,7 +1624,7 @@ def _format_datetime64_dateonly( return x._date_repr -def _get_format_datetime64( +def get_format_datetime64( is_dates_only: bool, nat_rep: str = "NaT", date_format: None = None ) -> Callable: @@ -1656,7 +1656,7 @@ def _format_strings(self) -> List[str]: """ we by definition have a TZ """ values = self.values.astype(object) is_dates_only = _is_dates_only(values) - formatter = self.formatter or _get_format_datetime64( + formatter = self.formatter or get_format_datetime64( is_dates_only, date_format=self.date_format ) fmt_values = [formatter(x) for x in values] diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 3bbb5271bce61..023557dd6494d 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -36,7 +36,7 @@ import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame -from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice +from pandas.core.indexing import maybe_numeric_slice, non_reducing_slice jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") @@ -475,7 +475,7 @@ def format(self, formatter, subset=None, na_rep: Optional[str] = None) -> "Style row_locs = range(len(self.data)) col_locs = range(len(self.data.columns)) else: - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) if len(subset) == 1: subset = subset, self.data.columns @@ -633,7 +633,7 @@ def _apply( **kwargs, ) -> "Styler": subset = slice(None) if subset is None else subset - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) data = self.data.loc[subset] if axis is not None: result = data.apply(func, axis=axis, result_type="expand", **kwargs) @@ -725,7 +725,7 @@ def _applymap(self, func: Callable, subset=None, **kwargs) -> "Styler": func = partial(func, **kwargs) # applymap doesn't take kwargs? if subset is None: subset = pd.IndexSlice[:] - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) result = self.data.loc[subset].applymap(func) self._update_ctx(result) return self @@ -985,7 +985,7 @@ def hide_columns(self, subset) -> "Styler": ------- self : Styler """ - subset = _non_reducing_slice(subset) + subset = non_reducing_slice(subset) hidden_df = self.data.loc[subset] self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns) return self @@ -1087,8 +1087,8 @@ def background_gradient( of the data is extended by ``low * (x.max() - x.min())`` and ``high * (x.max() - x.min())`` before normalizing. """ - subset = _maybe_numeric_slice(self.data, subset) - subset = _non_reducing_slice(subset) + subset = maybe_numeric_slice(self.data, subset) + subset = non_reducing_slice(subset) self.apply( self._background_gradient, cmap=cmap, @@ -1322,8 +1322,8 @@ def bar( "(eg: color=['#d65f5f', '#5fba7d'])" ) - subset = _maybe_numeric_slice(self.data, subset) - subset = _non_reducing_slice(subset) + subset = maybe_numeric_slice(self.data, subset) + subset = non_reducing_slice(subset) self.apply( self._bar, subset=subset, @@ -1390,7 +1390,7 @@ def _highlight_handler( axis: Optional[Axis] = None, max_: bool = True, ) -> "Styler": - subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset)) + subset = non_reducing_slice(maybe_numeric_slice(self.data, subset)) self.apply( self._highlight_extrema, color=color, axis=axis, subset=subset, max_=max_ ) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 147e4efd74bc3..c1ba7881165f1 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -33,6 +33,13 @@ from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.timeseries import ( + decorate_axes, + format_dateaxis, + maybe_convert_index, + maybe_resample, + use_dynamic_x, +) from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, @@ -1074,15 +1081,11 @@ def _is_ts_plot(self) -> bool: return not self.x_compat and self.use_index and self._use_dynamic_x() def _use_dynamic_x(self): - from pandas.plotting._matplotlib.timeseries import _use_dynamic_x - - return _use_dynamic_x(self._get_ax(0), self.data) + return use_dynamic_x(self._get_ax(0), self.data) def _make_plot(self): if self._is_ts_plot(): - from pandas.plotting._matplotlib.timeseries import _maybe_convert_index - - data = _maybe_convert_index(self._get_ax(0), self.data) + data = maybe_convert_index(self._get_ax(0), self.data) x = data.index # dummy, not used plotf = self._ts_plot @@ -1142,24 +1145,18 @@ def _plot( @classmethod def _ts_plot(cls, ax: "Axes", x, data, style=None, **kwds): - from pandas.plotting._matplotlib.timeseries import ( - _decorate_axes, - _maybe_resample, - format_dateaxis, - ) - # accept x to be consistent with normal plot func, # x is not passed to tsplot as it uses data.index as x coordinate # column_num must be in kwds for stacking purpose - freq, data = _maybe_resample(data, ax, kwds) + freq, data = maybe_resample(data, ax, kwds) # Set ax with freq info - _decorate_axes(ax, freq, kwds) + decorate_axes(ax, freq, kwds) # digging deeper if hasattr(ax, "left_ax"): - _decorate_axes(ax.left_ax, freq, kwds) + decorate_axes(ax.left_ax, freq, kwds) if hasattr(ax, "right_ax"): - _decorate_axes(ax.right_ax, freq, kwds) + decorate_axes(ax.right_ax, freq, kwds) ax._plot_data.append((data, cls._kind, kwds)) lines = cls._plot(ax, data.index, data.values, style=style, **kwds) diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index fd89a093d25a4..f8faac6a6a026 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -32,7 +32,7 @@ # Plotting functions and monkey patches -def _maybe_resample(series: "Series", ax: "Axes", kwargs): +def maybe_resample(series: "Series", ax: "Axes", kwargs): # resample against axes freq if necessary freq, ax_freq = _get_freq(ax, series) @@ -105,7 +105,7 @@ def _replot_ax(ax: "Axes", freq, kwargs): ax._plot_data = [] ax.clear() - _decorate_axes(ax, freq, kwargs) + decorate_axes(ax, freq, kwargs) lines = [] labels = [] @@ -128,7 +128,7 @@ def _replot_ax(ax: "Axes", freq, kwargs): return lines, labels -def _decorate_axes(ax: "Axes", freq, kwargs): +def decorate_axes(ax: "Axes", freq, kwargs): """Initialize axes for time-series plotting""" if not hasattr(ax, "_plot_data"): ax._plot_data = [] @@ -193,7 +193,7 @@ def _get_freq(ax: "Axes", series: "Series"): return freq, ax_freq -def _use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool: +def use_dynamic_x(ax: "Axes", data: FrameOrSeriesUnion) -> bool: freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) @@ -235,7 +235,7 @@ def _get_index_freq(index: "Index") -> Optional[BaseOffset]: return freq -def _maybe_convert_index(ax: "Axes", data): +def maybe_convert_index(ax: "Axes", data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index 532bb4f2e6dac..ec0391a2ccc26 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -6,7 +6,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, Timestamp import pandas._testing as tm -from pandas.core.indexing import _non_reducing_slice +from pandas.core.indexing import non_reducing_slice from pandas.tests.indexing.common import _mklbl @@ -739,7 +739,7 @@ def test_non_reducing_slice_on_multiindex(self): df = pd.DataFrame(dic, index=[0, 1]) idx = pd.IndexSlice slice_ = idx[:, idx["b", "d"]] - tslice_ = _non_reducing_slice(slice_) + tslice_ = non_reducing_slice(slice_) result = df.loc[tslice_] expected = pd.DataFrame({("b", "d"): [4, 1]}) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 5b7f013d5de31..a080c5d169215 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -12,7 +12,7 @@ import pandas as pd from pandas import DataFrame, Index, NaT, Series import pandas._testing as tm -from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice +from pandas.core.indexing import maybe_numeric_slice, non_reducing_slice from pandas.tests.indexing.common import _mklbl # ------------------------------------------------------------------------ @@ -822,7 +822,7 @@ def test_range_in_series_indexing(self, size): def test_non_reducing_slice(self, slc): df = DataFrame([[0, 1], [2, 3]]) - tslice_ = _non_reducing_slice(slc) + tslice_ = non_reducing_slice(slc) assert isinstance(df.loc[tslice_], DataFrame) def test_list_slice(self): @@ -831,18 +831,18 @@ def test_list_slice(self): df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"]) expected = pd.IndexSlice[:, ["A"]] for subset in slices: - result = _non_reducing_slice(subset) + result = non_reducing_slice(subset) tm.assert_frame_equal(df.loc[result], df.loc[expected]) def test_maybe_numeric_slice(self): df = DataFrame({"A": [1, 2], "B": ["c", "d"], "C": [True, False]}) - result = _maybe_numeric_slice(df, slice_=None) + result = maybe_numeric_slice(df, slice_=None) expected = pd.IndexSlice[:, ["A"]] assert result == expected - result = _maybe_numeric_slice(df, None, include_bool=True) + result = maybe_numeric_slice(df, None, include_bool=True) expected = pd.IndexSlice[:, ["A", "C"]] - result = _maybe_numeric_slice(df, [1]) + result = maybe_numeric_slice(df, [1]) expected = [1] assert result == expected From cab3e0e2f6d67a9e2e6648869e7dc60b19ca0c31 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 5 Sep 2020 03:56:30 +0100 Subject: [PATCH 429/632] TYP: misc fixes for numpy types (#36098) --- pandas/_typing.py | 2 +- pandas/core/algorithms.py | 7 +++---- pandas/core/arrays/categorical.py | 2 +- pandas/core/construction.py | 6 ++++-- pandas/core/dtypes/cast.py | 4 ++-- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pandas/_typing.py b/pandas/_typing.py index f8af92e07c674..74bfc9134c3af 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -62,7 +62,7 @@ # other Dtype = Union[ - "ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool]] + "ExtensionDtype", str, np.dtype, Type[Union[str, float, int, complex, bool, object]] ] DtypeObj = Union[np.dtype, "ExtensionDtype"] FilePathOrBuffer = Union[str, Path, IO[AnyStr], IOBase] diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 9d75d21c5637a..f297c7165208f 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -6,7 +6,7 @@ import operator from textwrap import dedent -from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, cast from warnings import catch_warnings, simplefilter, warn import numpy as np @@ -60,7 +60,7 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: - from pandas import DataFrame, Series + from pandas import Categorical, DataFrame, Series _shared_docs: Dict[str, str] = {} @@ -429,8 +429,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: if is_categorical_dtype(comps): # TODO(extension) # handle categoricals - # error: "ExtensionArray" has no attribute "isin" [attr-defined] - return comps.isin(values) # type: ignore[attr-defined] + return cast("Categorical", comps).isin(values) comps, dtype = _ensure_data(comps) values, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 27b1afdb438cb..ec85ec47d625c 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2316,7 +2316,7 @@ def _concat_same_type(self, to_concat): return union_categoricals(to_concat) - def isin(self, values): + def isin(self, values) -> np.ndarray: """ Check whether `values` are contained in Categorical. diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 02b8ed17244cd..9d6c2789af25b 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -335,7 +335,7 @@ def array( return result -def extract_array(obj, extract_numpy: bool = False): +def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. @@ -383,7 +383,9 @@ def extract_array(obj, extract_numpy: bool = False): if extract_numpy and isinstance(obj, ABCPandasArray): obj = obj.to_numpy() - return obj + # error: Incompatible return value type (got "Index", expected "ExtensionArray") + # error: Incompatible return value type (got "Series", expected "ExtensionArray") + return obj # type: ignore[return-value] def sanitize_array( diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 1489e08d82bf0..7c5aafcbbc7e9 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1488,7 +1488,7 @@ def find_common_type(types: List[DtypeObj]) -> DtypeObj: if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): - return object + return np.dtype("object") return np.find_common_type(types, []) @@ -1550,7 +1550,7 @@ def construct_1d_arraylike_from_scalar( elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"): # we need to coerce to object dtype to avoid # to allow numpy to take our string as a scalar value - dtype = object + dtype = np.dtype("object") if not isna(value): value = ensure_str(value) From 6b2a8606c45d4383b95b1e13da6f3ee8fce09883 Mon Sep 17 00:00:00 2001 From: Jonathan Shreckengost Date: Fri, 4 Sep 2020 23:10:49 -0400 Subject: [PATCH 430/632] Comma cleanup (#36082) --- .../tests/indexes/datetimes/test_datetime.py | 2 +- .../tests/indexes/datetimes/test_timezones.py | 2 +- .../tests/indexes/multi/test_constructors.py | 6 +++--- pandas/tests/indexes/multi/test_isin.py | 2 +- pandas/tests/indexes/test_base.py | 2 +- .../indexes/timedeltas/test_scalar_compat.py | 8 ++++---- .../indexes/timedeltas/test_searchsorted.py | 2 +- pandas/tests/indexing/common.py | 4 +--- pandas/tests/indexing/test_callable.py | 18 ++++++------------ pandas/tests/indexing/test_check_indexer.py | 8 +++----- pandas/tests/indexing/test_coercion.py | 2 +- pandas/tests/indexing/test_floats.py | 14 ++++---------- 12 files changed, 27 insertions(+), 43 deletions(-) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 7bb1d98086a91..8e2ac4feb7ded 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -51,7 +51,7 @@ def test_reindex_with_same_tz(self): "2010-01-02 00:00:00", ] expected1 = DatetimeIndex( - expected_list1, dtype="datetime64[ns, UTC]", freq=None, + expected_list1, dtype="datetime64[ns, UTC]", freq=None ) expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.dtype("intp")) tm.assert_index_equal(result1, expected1) diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index ea68e8759c123..233835bb4b5f7 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -799,7 +799,7 @@ def test_dti_from_tzaware_datetime(self, tz): @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_dti_tz_constructors(self, tzstr): - """ Test different DatetimeIndex constructions with timezone + """Test different DatetimeIndex constructions with timezone Follow-up of GH#4229 """ arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 1157c7f8bb962..16af884c89e9e 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -741,18 +741,18 @@ def test_raise_invalid_sortorder(): with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2, + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2 ) with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): MultiIndex( - levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1, + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1 ) def test_datetimeindex(): idx1 = pd.DatetimeIndex( - ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo", + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo" ) idx2 = pd.date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern") idx = MultiIndex.from_arrays([idx1, idx2]) diff --git a/pandas/tests/indexes/multi/test_isin.py b/pandas/tests/indexes/multi/test_isin.py index 122263e6ec198..b369b9a50954e 100644 --- a/pandas/tests/indexes/multi/test_isin.py +++ b/pandas/tests/indexes/multi/test_isin.py @@ -78,7 +78,7 @@ def test_isin_level_kwarg(): @pytest.mark.parametrize( "labels,expected,level", [ - ([("b", np.nan)], np.array([False, False, True]), None,), + ([("b", np.nan)], np.array([False, False, True]), None), ([np.nan, "a"], np.array([True, True, False]), 0), (["d", np.nan], np.array([False, True, True]), 1), ], diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index aee4b16621b4d..7720db9d98ebf 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2426,7 +2426,7 @@ def test_index_with_tuple_bool(self): # TODO: remove tupleize_cols=False once correct behaviour is restored # TODO: also this op right now produces FutureWarning from numpy idx = Index([("a", "b"), ("b", "c"), ("c", "a")], tupleize_cols=False) - result = idx == ("c", "a",) + result = idx == ("c", "a") expected = np.array([False, False, True]) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/timedeltas/test_scalar_compat.py b/pandas/tests/indexes/timedeltas/test_scalar_compat.py index 16c19b8d00380..6a2238d90b590 100644 --- a/pandas/tests/indexes/timedeltas/test_scalar_compat.py +++ b/pandas/tests/indexes/timedeltas/test_scalar_compat.py @@ -104,18 +104,18 @@ def test_round(self): "L", t1a, TimedeltaIndex( - ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"], + ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"] ), ), ( "S", t1a, TimedeltaIndex( - ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"], + ["-1 days +00:00:00", "-2 days +23:58:58", "-2 days +23:57:56"] ), ), - ("12T", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"],),), - ("H", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"],),), + ("12T", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])), + ("H", t1c, TimedeltaIndex(["-1 days", "-1 days", "-1 days"])), ("d", t1c, TimedeltaIndex([-1, -1, -1], unit="D")), ]: diff --git a/pandas/tests/indexes/timedeltas/test_searchsorted.py b/pandas/tests/indexes/timedeltas/test_searchsorted.py index 4806a9acff96f..3cf45931cf6b7 100644 --- a/pandas/tests/indexes/timedeltas/test_searchsorted.py +++ b/pandas/tests/indexes/timedeltas/test_searchsorted.py @@ -17,7 +17,7 @@ def test_searchsorted_different_argument_classes(self, klass): tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( - "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2], + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] ) def test_searchsorted_invalid_argument_dtype(self, arg): idx = TimedeltaIndex(["1 day", "2 days", "3 days"]) diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index 9cc031001f81c..656d25bec2a6b 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -144,9 +144,7 @@ def check_values(self, f, func, values=False): tm.assert_almost_equal(result, expected) - def check_result( - self, method, key, typs=None, axes=None, fails=None, - ): + def check_result(self, method, key, typs=None, axes=None, fails=None): def _eq(axis, obj, key): """ compare equal for these 2 keys """ axified = _axify(obj, key, axis) diff --git a/pandas/tests/indexing/test_callable.py b/pandas/tests/indexing/test_callable.py index 621417eb38d94..bf51c3e5d1695 100644 --- a/pandas/tests/indexing/test_callable.py +++ b/pandas/tests/indexing/test_callable.py @@ -17,15 +17,11 @@ def test_frame_loc_callable(self): res = df.loc[lambda x: x.A > 2] tm.assert_frame_equal(res, df.loc[df.A > 2]) - res = df.loc[ - lambda x: x.A > 2, - ] # noqa: E231 - tm.assert_frame_equal(res, df.loc[df.A > 2,]) # noqa: E231 + res = df.loc[lambda x: x.A > 2] # noqa: E231 + tm.assert_frame_equal(res, df.loc[df.A > 2]) # noqa: E231 - res = df.loc[ - lambda x: x.A > 2, - ] # noqa: E231 - tm.assert_frame_equal(res, df.loc[df.A > 2,]) # noqa: E231 + res = df.loc[lambda x: x.A > 2] # noqa: E231 + tm.assert_frame_equal(res, df.loc[df.A > 2]) # noqa: E231 res = df.loc[lambda x: x.B == "b", :] tm.assert_frame_equal(res, df.loc[df.B == "b", :]) @@ -94,10 +90,8 @@ def test_frame_loc_callable_labels(self): res = df.loc[lambda x: ["A", "C"]] tm.assert_frame_equal(res, df.loc[["A", "C"]]) - res = df.loc[ - lambda x: ["A", "C"], - ] # noqa: E231 - tm.assert_frame_equal(res, df.loc[["A", "C"],]) # noqa: E231 + res = df.loc[lambda x: ["A", "C"]] # noqa: E231 + tm.assert_frame_equal(res, df.loc[["A", "C"]]) # noqa: E231 res = df.loc[lambda x: ["A", "C"], :] tm.assert_frame_equal(res, df.loc[["A", "C"], :]) diff --git a/pandas/tests/indexing/test_check_indexer.py b/pandas/tests/indexing/test_check_indexer.py index 69d4065234d93..865ecb129cdfa 100644 --- a/pandas/tests/indexing/test_check_indexer.py +++ b/pandas/tests/indexing/test_check_indexer.py @@ -32,7 +32,7 @@ def test_valid_input(indexer, expected): @pytest.mark.parametrize( - "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")], + "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")] ) def test_boolean_na_returns_indexer(indexer): # https://github.com/pandas-dev/pandas/issues/31503 @@ -61,7 +61,7 @@ def test_bool_raise_length(indexer): @pytest.mark.parametrize( - "indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")], + "indexer", [[0, 1, None], pd.array([0, 1, pd.NA], dtype="Int64")] ) def test_int_raise_missing_values(indexer): array = np.array([1, 2, 3]) @@ -89,9 +89,7 @@ def test_raise_invalid_array_dtypes(indexer): check_array_indexer(array, indexer) -@pytest.mark.parametrize( - "indexer", [None, Ellipsis, slice(0, 3), (None,)], -) +@pytest.mark.parametrize("indexer", [None, Ellipsis, slice(0, 3), (None,)]) def test_pass_through_non_array_likes(indexer): array = np.array([1, 2, 3]) diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 1c5f00ff754a4..752ecd47fe089 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -87,7 +87,7 @@ def _assert_setitem_series_conversion( # tm.assert_series_equal(temp, expected_series) @pytest.mark.parametrize( - "val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)], + "val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)] ) def test_setitem_series_object(self, val, exp_dtype): obj = pd.Series(list("abcd")) diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 18b9898e7d800..c48e0a129e161 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -181,9 +181,7 @@ def test_scalar_with_mixed(self): expected = 3 assert result == expected - @pytest.mark.parametrize( - "index_func", [tm.makeIntIndex, tm.makeRangeIndex], - ) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) @pytest.mark.parametrize("klass", [Series, DataFrame]) def test_scalar_integer(self, index_func, klass): @@ -405,7 +403,7 @@ def test_slice_integer(self): @pytest.mark.parametrize("l", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) def test_integer_positional_indexing(self, l): - """ make sure that we are raising on positional indexing + """make sure that we are raising on positional indexing w.r.t. an integer index """ s = Series(range(2, 6), index=range(2, 6)) @@ -425,9 +423,7 @@ def test_integer_positional_indexing(self, l): with pytest.raises(TypeError, match=msg): s.iloc[l] - @pytest.mark.parametrize( - "index_func", [tm.makeIntIndex, tm.makeRangeIndex], - ) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) def test_slice_integer_frame_getitem(self, index_func): # similar to above, but on the getitem dim (of a DataFrame) @@ -486,9 +482,7 @@ def test_slice_integer_frame_getitem(self, index_func): s[l] @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) - @pytest.mark.parametrize( - "index_func", [tm.makeIntIndex, tm.makeRangeIndex], - ) + @pytest.mark.parametrize("index_func", [tm.makeIntIndex, tm.makeRangeIndex]) def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): # similar to above, but on the getitem dim (of a DataFrame) From b73489f695b06da16964d793330db475efbac202 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 4 Sep 2020 20:11:39 -0700 Subject: [PATCH 431/632] CLN: remove unused args/kwargs (#36129) --- pandas/core/groupby/generic.py | 1 + pandas/core/groupby/groupby.py | 2 ++ pandas/core/groupby/ops.py | 8 ++++---- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 53edd056a6802..173ff99912f05 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1084,6 +1084,7 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: assert how == "ohlc" raise + # We get here with a) EADtypes and b) object dtype obj: Union[Series, DataFrame] # call our grouper again with only this block if isinstance(bvalues, ExtensionArray): diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 651af2d314251..6ef2e67030881 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1012,6 +1012,8 @@ def _agg_general( # raised in _get_cython_function, in some cases can # be trimmed by implementing cython funcs for more dtypes pass + else: + raise # apply a non-cython aggregation result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index c076b6e2e181b..e9525f03368fa 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -601,7 +601,7 @@ def _transform( return result - def agg_series(self, obj: Series, func: F, *args, **kwargs): + def agg_series(self, obj: Series, func: F): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 @@ -649,7 +649,7 @@ def _aggregate_series_fast(self, obj: Series, func: F): result, counts = grouper.get_result() return result, counts - def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs): + def _aggregate_series_pure_python(self, obj: Series, func: F): group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) @@ -658,7 +658,7 @@ def _aggregate_series_pure_python(self, obj: Series, func: F, *args, **kwargs): splitter = get_splitter(obj, group_index, ngroups, axis=0) for label, group in splitter: - res = func(group, *args, **kwargs) + res = func(group) if result is None: if isinstance(res, (Series, Index, np.ndarray)): @@ -835,7 +835,7 @@ def groupings(self) -> "List[grouper.Grouping]": for lvl, name in zip(self.levels, self.names) ] - def agg_series(self, obj: Series, func: F, *args, **kwargs): + def agg_series(self, obj: Series, func: F): # Caller is responsible for checking ngroups != 0 assert self.ngroups != 0 assert len(self.bins) > 0 # otherwise we'd get IndexError in get_result From d0de0c6d15192cafd81bcb207b4d95e62b4ab89d Mon Sep 17 00:00:00 2001 From: David Kwong Date: Sat, 5 Sep 2020 13:15:03 +1000 Subject: [PATCH 432/632] BUG: Fix DataFrame.groupby().apply() for NaN groups with dropna=False (#35951) --- doc/source/whatsnew/v1.2.0.rst | 3 +- pandas/core/reshape/concat.py | 6 ++- pandas/tests/groupby/test_groupby_dropna.py | 53 +++++++++++++++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index e65daa439a225..aa3255e673797 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -214,7 +214,8 @@ Performance improvements Bug fixes ~~~~~~~~~ - +- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) +- Categorical ^^^^^^^^^^^ diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 299b68c6e71e0..9b94dae8556f6 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -11,6 +11,7 @@ from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries +from pandas.core.dtypes.missing import isna from pandas.core.arrays.categorical import ( factorize_from_iterable, @@ -624,10 +625,11 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde for hlevel, level in zip(zipped, levels): to_concat = [] for key, index in zip(hlevel, indexes): - mask = level == key + # Find matching codes, include matching nan values as equal. + mask = (isna(level) & isna(key)) | (level == key) if not mask.any(): raise ValueError(f"Key {key} not in level {level}") - i = np.nonzero(level == key)[0][0] + i = np.nonzero(mask)[0][0] to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index d1501111cb22b..66db06eeebdfb 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -274,3 +274,56 @@ def test_groupby_dropna_datetime_like_data( expected = pd.DataFrame({"values": values}, index=pd.Index(indexes, name="dt")) tm.assert_frame_equal(grouped, expected) + + +@pytest.mark.parametrize( + "dropna, data, selected_data, levels", + [ + pytest.param( + False, + {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0, 0]}, + ["a", "b", np.nan], + id="dropna_false_has_nan", + ), + pytest.param( + True, + {"groups": ["a", "a", "b", np.nan], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0]}, + None, + id="dropna_true_has_nan", + ), + pytest.param( + # no nan in "groups"; dropna=True|False should be same. + False, + {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0, 0]}, + None, + id="dropna_false_no_nan", + ), + pytest.param( + # no nan in "groups"; dropna=True|False should be same. + True, + {"groups": ["a", "a", "b", "c"], "values": [10, 10, 20, 30]}, + {"values": [0, 1, 0, 0]}, + None, + id="dropna_true_no_nan", + ), + ], +) +def test_groupby_apply_with_dropna_for_multi_index(dropna, data, selected_data, levels): + # GH 35889 + + df = pd.DataFrame(data) + gb = df.groupby("groups", dropna=dropna) + result = gb.apply(lambda grp: pd.DataFrame({"values": range(len(grp))})) + + mi_tuples = tuple(zip(data["groups"], selected_data["values"])) + mi = pd.MultiIndex.from_tuples(mi_tuples, names=["groups", None]) + # Since right now, by default MI will drop NA from levels when we create MI + # via `from_*`, so we need to add NA for level manually afterwards. + if not dropna and levels: + mi = mi.set_levels(levels, level="groups") + + expected = pd.DataFrame(selected_data, index=mi) + tm.assert_frame_equal(result, expected) From c40bf021dc3b421f6412011ec54510f568c7c1ce Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Sat, 5 Sep 2020 05:18:12 +0200 Subject: [PATCH 433/632] Bug 29764 groupby loses index name sometimes (#36121) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/groupby/generic.py | 1 + pandas/tests/groupby/test_groupby.py | 23 +++++++++++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index aa3255e673797..3b252202c14c5 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -312,6 +312,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupby.apply` would drop a :class:`CategoricalIndex` when grouped on. (:issue:`35792`) - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) - Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) +- Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) - Reshaping diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 173ff99912f05..b855ce65f41b2 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1695,6 +1695,7 @@ def _wrap_transformed_output( """ indexed_output = {key.position: val for key, val in output.items()} columns = Index(key.label for key in output) + columns.name = self.obj.columns.name result = self.obj._constructor(indexed_output) result.columns = columns diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index eec9e8064d584..e0196df7ceac0 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2111,3 +2111,26 @@ def test_subsetting_columns_keeps_attrs(klass, attr, value): expected = df.groupby("a", **{attr: value}) result = expected[["b"]] if klass is DataFrame else expected["b"] assert getattr(result, attr) == getattr(expected, attr) + + +@pytest.mark.parametrize("func", ["sum", "any", "shift"]) +def test_groupby_column_index_name_lost(func): + # GH: 29764 groupby loses index sometimes + expected = pd.Index(["a"], name="idx") + df = pd.DataFrame([[1]], columns=expected) + df_grouped = df.groupby([1]) + result = getattr(df_grouped, func)().columns + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_groupby_column_index_name_lost_fill_funcs(func): + # GH: 29764 groupby loses index sometimes + df = pd.DataFrame( + [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]], + columns=pd.Index(["type", "a", "b"], name="idx"), + ) + df_grouped = df.groupby(["type"])[["a", "b"]] + result = getattr(df_grouped, func)().columns + expected = pd.Index(["a", "b"], name="idx") + tm.assert_index_equal(result, expected) From 6f453048108a4f7d4ac31543d473e825849eede2 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 5 Sep 2020 04:18:59 +0100 Subject: [PATCH 434/632] STY: add code check for use of builtin filter function (#36089) --- ci/code_checks.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 2e0f27fefca0b..6006d09bc3e78 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -179,6 +179,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -E "super\(\w*, (self|cls)\)" pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of builtin filter function' ; echo $MSG + invgrep -R --include="*.py" -P '(? Date: Fri, 4 Sep 2020 20:21:49 -0700 Subject: [PATCH 435/632] BUG: df.replace with numeric values and str to_replace (#36093) --- doc/source/user_guide/missing_data.rst | 26 ----- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/array_algos/replace.py | 95 ++++++++++++++++++ pandas/core/generic.py | 14 --- pandas/core/internals/blocks.py | 27 ++++- pandas/core/internals/managers.py | 104 +------------------- pandas/tests/frame/methods/test_replace.py | 15 ++- pandas/tests/series/methods/test_replace.py | 5 +- 8 files changed, 136 insertions(+), 151 deletions(-) create mode 100644 pandas/core/array_algos/replace.py diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index 2e68a0598bb71..28206192dd161 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -689,32 +689,6 @@ You can also operate on the DataFrame in place: df.replace(1.5, np.nan, inplace=True) -.. warning:: - - When replacing multiple ``bool`` or ``datetime64`` objects, the first - argument to ``replace`` (``to_replace``) must match the type of the value - being replaced. For example, - - .. code-block:: python - - >>> s = pd.Series([True, False, True]) - >>> s.replace({'a string': 'new value', True: False}) # raises - TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' - - will raise a ``TypeError`` because one of the ``dict`` keys is not of the - correct type for replacement. - - However, when replacing a *single* object such as, - - .. ipython:: python - - s = pd.Series([True, False, True]) - s.replace('a string', 'another string') - - the original ``NDFrame`` object will be returned untouched. We're working on - unifying this API, but for backwards compatibility reasons we cannot break - the latter behavior. See :issue:`6354` for more details. - Missing data casting rules and indexing --------------------------------------- diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3b252202c14c5..8b28a4439e1da 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -339,6 +339,7 @@ ExtensionArray Other ^^^^^ - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) +- Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py new file mode 100644 index 0000000000000..6ac3cc1f9f2fe --- /dev/null +++ b/pandas/core/array_algos/replace.py @@ -0,0 +1,95 @@ +""" +Methods used by Block.replace and related methods. +""" +import operator +import re +from typing import Optional, Pattern, Union + +import numpy as np + +from pandas._typing import ArrayLike, Scalar + +from pandas.core.dtypes.common import ( + is_datetimelike_v_numeric, + is_numeric_v_string_like, + is_scalar, +) +from pandas.core.dtypes.missing import isna + + +def compare_or_regex_search( + a: ArrayLike, + b: Union[Scalar, Pattern], + regex: bool = False, + mask: Optional[ArrayLike] = None, +) -> Union[ArrayLike, bool]: + """ + Compare two array_like inputs of the same shape or two scalar values + + Calls operator.eq or re.search, depending on regex argument. If regex is + True, perform an element-wise regex matching. + + Parameters + ---------- + a : array_like + b : scalar or regex pattern + regex : bool, default False + mask : array_like or None (default) + + Returns + ------- + mask : array_like of bool + """ + + def _check_comparison_types( + result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern] + ): + """ + Raises an error if the two arrays (a,b) cannot be compared. + Otherwise, returns the comparison result as expected. + """ + if is_scalar(result) and isinstance(a, np.ndarray): + type_names = [type(a).__name__, type(b).__name__] + + if isinstance(a, np.ndarray): + type_names[0] = f"ndarray(dtype={a.dtype})" + + raise TypeError( + f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" + ) + + if not regex: + op = lambda x: operator.eq(x, b) + else: + op = np.vectorize( + lambda x: bool(re.search(b, x)) + if isinstance(x, str) and isinstance(b, (str, Pattern)) + else False + ) + + # GH#32621 use mask to avoid comparing to NAs + if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): + mask = np.reshape(~(isna(a)), a.shape) + if isinstance(a, np.ndarray): + a = a[mask] + + if is_numeric_v_string_like(a, b): + # GH#29553 avoid deprecation warnings from numpy + return np.zeros(a.shape, dtype=bool) + + elif is_datetimelike_v_numeric(a, b): + # GH#29553 avoid deprecation warnings from numpy + _check_comparison_types(False, a, b) + return False + + result = op(a) + + if isinstance(result, np.ndarray) and mask is not None: + # The shape of the mask can differ to that of the result + # since we may compare only a subset of a's or b's elements + tmp = np.zeros(mask.shape, dtype=np.bool_) + tmp[mask] = result + result = tmp + + _check_comparison_types(result, a, b) + return result diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2af323ccc1dd3..93c945638a174 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6561,20 +6561,6 @@ def replace( 1 new new 2 bait xyz - Note that when replacing multiple ``bool`` or ``datetime64`` objects, - the data types in the `to_replace` parameter must match the data - type of the value being replaced: - - >>> df = pd.DataFrame({{'A': [True, False, True], - ... 'B': [False, True, False]}}) - >>> df.replace({{'a string': 'new value', True: False}}) # raises - Traceback (most recent call last): - ... - TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str' - - This raises a ``TypeError`` because one of the ``dict`` keys is not of - the correct type for replacement. - Compare the behavior of ``s.replace({{'a': None}})`` and ``s.replace('a', None)`` to understand the peculiarities of the `to_replace` parameter: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index b2305736f9d46..3bcd4debbf41a 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -11,7 +11,7 @@ from pandas._libs.internals import BlockPlacement from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import tz_compare -from pandas._typing import ArrayLike +from pandas._typing import ArrayLike, Scalar from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -59,6 +59,7 @@ from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, isna_compat import pandas.core.algorithms as algos +from pandas.core.array_algos.replace import compare_or_regex_search from pandas.core.array_algos.transforms import shift from pandas.core.arrays import ( Categorical, @@ -792,7 +793,6 @@ def _replace_list( self, src_list: List[Any], dest_list: List[Any], - masks: List[np.ndarray], inplace: bool = False, regex: bool = False, ) -> List["Block"]: @@ -801,11 +801,28 @@ def _replace_list( """ src_len = len(src_list) - 1 + def comp(s: Scalar, mask: np.ndarray, regex: bool = False) -> np.ndarray: + """ + Generate a bool array by perform an equality check, or perform + an element-wise regular expression matching + """ + if isna(s): + return ~mask + + s = com.maybe_box_datetimelike(s) + return compare_or_regex_search(self.values, s, regex, mask) + + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + mask = ~isna(self.values) + + masks = [comp(s, mask, regex) for s in src_list] + rb = [self if inplace else self.copy()] for i, (src, dest) in enumerate(zip(src_list, dest_list)): new_rb: List["Block"] = [] for blk in rb: - m = masks[i][blk.mgr_locs.indexer] + m = masks[i] convert = i == src_len # only convert once at the end result = blk._replace_coerce( mask=m, @@ -2908,7 +2925,9 @@ def _extract_bool_array(mask: ArrayLike) -> np.ndarray: """ if isinstance(mask, ExtensionArray): # We could have BooleanArray, Sparse[bool], ... - mask = np.asarray(mask, dtype=np.bool_) + # Except for BooleanArray, this is equivalent to just + # np.asarray(mask, dtype=bool) + mask = mask.to_numpy(dtype=bool, na_value=False) assert isinstance(mask, np.ndarray), type(mask) assert mask.dtype == bool, mask.dtype diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 753b949f7c802..57a4a8c2ace8a 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1,14 +1,11 @@ from collections import defaultdict import itertools -import operator -import re from typing import ( Any, DefaultDict, Dict, List, Optional, - Pattern, Sequence, Tuple, TypeVar, @@ -19,7 +16,7 @@ import numpy as np from pandas._libs import internals as libinternals, lib -from pandas._typing import ArrayLike, DtypeObj, Label, Scalar +from pandas._typing import ArrayLike, DtypeObj, Label from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -29,12 +26,9 @@ ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, - is_datetimelike_v_numeric, is_dtype_equal, is_extension_array_dtype, is_list_like, - is_numeric_v_string_like, - is_scalar, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype @@ -44,7 +38,6 @@ import pandas.core.algorithms as algos from pandas.core.arrays.sparse import SparseDtype from pandas.core.base import PandasObject -import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import Index, ensure_index @@ -628,31 +621,10 @@ def replace_list( """ do a list replace """ inplace = validate_bool_kwarg(inplace, "inplace") - # figure out our mask apriori to avoid repeated replacements - values = self.as_array() - - def comp(s: Scalar, mask: np.ndarray, regex: bool = False): - """ - Generate a bool array by perform an equality check, or perform - an element-wise regular expression matching - """ - if isna(s): - return ~mask - - s = com.maybe_box_datetimelike(s) - return _compare_or_regex_search(values, s, regex, mask) - - # Calculate the mask once, prior to the call of comp - # in order to avoid repeating the same computations - mask = ~isna(values) - - masks = [comp(s, mask, regex) for s in src_list] - bm = self.apply( "_replace_list", src_list=src_list, dest_list=dest_list, - masks=masks, inplace=inplace, regex=regex, ) @@ -1900,80 +1872,6 @@ def _merge_blocks( return blocks -def _compare_or_regex_search( - a: ArrayLike, - b: Union[Scalar, Pattern], - regex: bool = False, - mask: Optional[ArrayLike] = None, -) -> Union[ArrayLike, bool]: - """ - Compare two array_like inputs of the same shape or two scalar values - - Calls operator.eq or re.search, depending on regex argument. If regex is - True, perform an element-wise regex matching. - - Parameters - ---------- - a : array_like - b : scalar or regex pattern - regex : bool, default False - mask : array_like or None (default) - - Returns - ------- - mask : array_like of bool - """ - - def _check_comparison_types( - result: Union[ArrayLike, bool], a: ArrayLike, b: Union[Scalar, Pattern] - ): - """ - Raises an error if the two arrays (a,b) cannot be compared. - Otherwise, returns the comparison result as expected. - """ - if is_scalar(result) and isinstance(a, np.ndarray): - type_names = [type(a).__name__, type(b).__name__] - - if isinstance(a, np.ndarray): - type_names[0] = f"ndarray(dtype={a.dtype})" - - raise TypeError( - f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" - ) - - if not regex: - op = lambda x: operator.eq(x, b) - else: - op = np.vectorize( - lambda x: bool(re.search(b, x)) - if isinstance(x, str) and isinstance(b, (str, Pattern)) - else False - ) - - # GH#32621 use mask to avoid comparing to NAs - if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): - mask = np.reshape(~(isna(a)), a.shape) - if isinstance(a, np.ndarray): - a = a[mask] - - if is_datetimelike_v_numeric(a, b) or is_numeric_v_string_like(a, b): - # GH#29553 avoid deprecation warnings from numpy - _check_comparison_types(False, a, b) - return False - - result = op(a) - - if isinstance(result, np.ndarray) and mask is not None: - # The shape of the mask can differ to that of the result - # since we may compare only a subset of a's or b's elements - tmp = np.zeros(mask.shape, dtype=np.bool_) - tmp[mask] = result - result = tmp - - _check_comparison_types(result, a, b) - return result - - def _fast_count_smallints(arr: np.ndarray) -> np.ndarray: """Faster version of set(arr) for sequences of small numbers.""" counts = np.bincount(arr.astype(np.int_)) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 83dfd42ae2a6e..ea2488dfc0877 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1131,8 +1131,19 @@ def test_replace_bool_with_bool(self): def test_replace_with_dict_with_bool_keys(self): df = DataFrame({0: [True, False], 1: [False, True]}) - with pytest.raises(TypeError, match="Cannot compare types .+"): - df.replace({"asdf": "asdb", True: "yes"}) + result = df.replace({"asdf": "asdb", True: "yes"}) + expected = DataFrame({0: ["yes", False], 1: [False, "yes"]}) + tm.assert_frame_equal(result, expected) + + def test_replace_dict_strings_vs_ints(self): + # GH#34789 + df = pd.DataFrame({"Y0": [1, 2], "Y1": [3, 4]}) + result = df.replace({"replace_string": "test"}) + + tm.assert_frame_equal(result, df) + + result = df["Y0"].replace({"replace_string": "test"}) + tm.assert_series_equal(result, df["Y0"]) def test_replace_truthy(self): df = DataFrame({"a": [True, True]}) diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index ccaa005369a1c..e255d46e81851 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -218,8 +218,9 @@ def test_replace_bool_with_bool(self): def test_replace_with_dict_with_bool_keys(self): s = pd.Series([True, False, True]) - with pytest.raises(TypeError, match="Cannot compare types .+"): - s.replace({"asdf": "asdb", True: "yes"}) + result = s.replace({"asdf": "asdb", True: "yes"}) + expected = pd.Series(["yes", False, "yes"]) + tm.assert_series_equal(result, expected) def test_replace2(self): N = 100 From c43652ef8a2342ba3eb065ba7e3e6733096bd4d3 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Fri, 4 Sep 2020 22:33:49 -0500 Subject: [PATCH 436/632] CLN: resolve UserWarning in `pandas/plotting/_matplotlib/core.py` #35945 (#35946) --- pandas/plotting/_matplotlib/core.py | 2 +- pandas/tests/plotting/test_frame.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index c1ba7881165f1..f0b35e1cd2a74 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1223,8 +1223,8 @@ def get_label(i): if self._need_to_set_index: xticks = ax.get_xticks() xticklabels = [get_label(x) for x in xticks] - ax.set_xticklabels(xticklabels) ax.xaxis.set_major_locator(FixedLocator(xticks)) + ax.set_xticklabels(xticklabels) condition = ( not self._use_dynamic_x() diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index ee43e5d7072fe..9ab697cb57690 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -2796,10 +2796,12 @@ def test_table(self): _check_plot_works(df.plot, table=True) _check_plot_works(df.plot, table=df) - ax = df.plot() - assert len(ax.tables) == 0 - plotting.table(ax, df.T) - assert len(ax.tables) == 1 + # GH 35945 UserWarning + with tm.assert_produces_warning(None): + ax = df.plot() + assert len(ax.tables) == 0 + plotting.table(ax, df.T) + assert len(ax.tables) == 1 def test_errorbar_scatter(self): df = DataFrame(np.random.randn(5, 2), index=range(5), columns=["x", "y"]) From 9fea06cec987436b02bc67e080fa5d886826f1c3 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Sat, 5 Sep 2020 06:50:57 -0400 Subject: [PATCH 437/632] add note about missing values to Categorical docstring (#36125) --- pandas/core/arrays/categorical.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ec85ec47d625c..c3c9009dda659 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -280,6 +280,19 @@ class Categorical(NDArrayBackedExtensionArray, PandasObject): ['a', 'b', 'c', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] + Missing values are not included as a category. + + >>> c = pd.Categorical([1, 2, 3, 1, 2, 3, np.nan]) + >>> c + [1, 2, 3, 1, 2, 3, NaN] + Categories (3, int64): [1, 2, 3] + + However, their presence is indicated in the `codes` attribute + by code `-1`. + + >>> c.codes + array([ 0, 1, 2, 0, 1, 2, -1], dtype=int8) + Ordered `Categoricals` can be sorted according to the custom order of the categories and can have a min and max value. From 70c056becc71d1ab43cb6ef6c737b852ae89c15e Mon Sep 17 00:00:00 2001 From: Sarthak Vineet Kumar Date: Sat, 5 Sep 2020 18:09:11 +0530 Subject: [PATCH 438/632] CLN removing trailing commas (#36101) --- pandas/tests/io/test_sql.py | 3 --- pandas/tests/io/test_stata.py | 4 ++-- pandas/tests/plotting/test_frame.py | 4 ++-- pandas/tests/resample/test_datetime_index.py | 10 ++++------ .../tests/reshape/merge/test_merge_index_as_string.py | 4 ++-- pandas/tests/reshape/test_crosstab.py | 4 ++-- pandas/tests/reshape/test_get_dummies.py | 2 +- 7 files changed, 13 insertions(+), 18 deletions(-) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index a7e3162ed7b73..1edcc937f72c3 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2349,9 +2349,6 @@ def date_format(dt): def format_query(sql, *args): - """ - - """ processed_args = [] for arg in args: if isinstance(arg, float) and isna(arg): diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 6d7fec803a8e0..88f61390957a6 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -1153,7 +1153,7 @@ def test_read_chunks_117( from_frame = parsed.iloc[pos : pos + chunksize, :].copy() from_frame = self._convert_categorical(from_frame) tm.assert_frame_equal( - from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, + from_frame, chunk, check_dtype=False, check_datetimelike_compat=True ) pos += chunksize @@ -1251,7 +1251,7 @@ def test_read_chunks_115( from_frame = parsed.iloc[pos : pos + chunksize, :].copy() from_frame = self._convert_categorical(from_frame) tm.assert_frame_equal( - from_frame, chunk, check_dtype=False, check_datetimelike_compat=True, + from_frame, chunk, check_dtype=False, check_datetimelike_compat=True ) pos += chunksize diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 9ab697cb57690..128a7bdb6730a 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -1321,7 +1321,7 @@ def test_scatter_with_c_column_name_with_colors(self, cmap): def test_plot_scatter_with_s(self): # this refers to GH 32904 - df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"],) + df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"]) ax = df.plot.scatter(x="a", y="b", s="c") tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes()) @@ -1716,7 +1716,7 @@ def test_hist_df(self): def test_hist_weights(self, weights): # GH 33173 np.random.seed(0) - df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100,)))) + df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100)))) ax1 = _check_plot_works(df.plot, kind="hist", weights=weights) ax2 = _check_plot_works(df.plot, kind="hist") diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index e7637a598403f..59a0183304c76 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -124,7 +124,7 @@ def test_resample_integerarray(): result = ts.resample("3T").mean() expected = Series( - [1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64", + [1, 4, 7], index=pd.date_range("1/1/2000", periods=3, freq="3T"), dtype="Int64" ) tm.assert_series_equal(result, expected) @@ -764,7 +764,7 @@ def test_resample_origin(): @pytest.mark.parametrize( - "origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()], + "origin", ["invalid_value", "epch", "startday", "startt", "2000-30-30", object()] ) def test_resample_bad_origin(origin): rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s") @@ -777,9 +777,7 @@ def test_resample_bad_origin(origin): ts.resample("5min", origin=origin) -@pytest.mark.parametrize( - "offset", ["invalid_value", "12dayys", "2000-30-30", object()], -) +@pytest.mark.parametrize("offset", ["invalid_value", "12dayys", "2000-30-30", object()]) def test_resample_bad_offset(offset): rng = date_range("2000-01-01 00:00:00", "2000-01-01 02:00", freq="s") ts = Series(np.random.randn(len(rng)), index=rng) @@ -1595,7 +1593,7 @@ def test_downsample_dst_at_midnight(): "America/Havana", ambiguous=True ) dti = pd.DatetimeIndex(dti, freq="D") - expected = DataFrame([7.5, 28.0, 44.5], index=dti,) + expected = DataFrame([7.5, 28.0, 44.5], index=dti) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_merge_index_as_string.py b/pandas/tests/reshape/merge/test_merge_index_as_string.py index 08614d04caf4b..d20d93370ec7e 100644 --- a/pandas/tests/reshape/merge/test_merge_index_as_string.py +++ b/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -29,7 +29,7 @@ def df2(): @pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) def left_df(request, df1): - """ Construct left test DataFrame with specified levels + """Construct left test DataFrame with specified levels (any of 'outer', 'inner', and 'v1') """ levels = request.param @@ -41,7 +41,7 @@ def left_df(request, df1): @pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) def right_df(request, df2): - """ Construct right test DataFrame with specified levels + """Construct right test DataFrame with specified levels (any of 'outer', 'inner', and 'v2') """ levels = request.param diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index 6f5550a6f8209..1aadcfdc30f1b 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -354,7 +354,7 @@ def test_crosstab_normalize(self): crosstab(df.a, df.b, normalize="columns"), ) tm.assert_frame_equal( - crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index"), + crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index") ) row_normal_margins = DataFrame( @@ -377,7 +377,7 @@ def test_crosstab_normalize(self): crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins ) tm.assert_frame_equal( - crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins, + crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins ) tm.assert_frame_equal( crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index c003bfa6a239a..ce13762ea8f86 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -161,7 +161,7 @@ def test_get_dummies_unicode(self, sparse): s = [e, eacute, eacute] res = get_dummies(s, prefix="letter", sparse=sparse) exp = DataFrame( - {"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8, + {"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8 ) if sparse: exp = exp.apply(SparseArray, fill_value=0) From c4f0c6f5537c5b0983561acb0c4f8c7daac5c98a Mon Sep 17 00:00:00 2001 From: Thomas Dickson Date: Sat, 5 Sep 2020 15:44:26 +0100 Subject: [PATCH 439/632] Updated series documentation to close #35406 (#36139) --- pandas/core/series.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/core/series.py b/pandas/core/series.py index 9d84ce4b9ab2e..d8fdaa2a60252 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -164,9 +164,9 @@ class Series(base.IndexOpsMixin, generic.NDFrame): index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to - RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index - sequence are used, the index will override the keys found in the - dict. + RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like + and index is None, then the values in the index are used to + reindex the Series after it is created using the keys in the data. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. From 6fa1a450d411b1bea6190532bc67b39b9dab4179 Mon Sep 17 00:00:00 2001 From: joooeey Date: Sat, 5 Sep 2020 16:49:09 +0200 Subject: [PATCH 440/632] BUG: repair 'style' kwd handling in DataFrame.plot (#21003) (#33821) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/plotting/_matplotlib/core.py | 27 ++++++++++++++++----------- pandas/tests/plotting/test_frame.py | 18 ++++++++++++++++++ pandas/tests/plotting/test_series.py | 2 +- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8b28a4439e1da..39e53daf516c4 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -299,7 +299,7 @@ I/O Plotting ^^^^^^^^ -- +- Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`) - Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index f0b35e1cd2a74..def4a1dc3f5c4 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1,4 +1,3 @@ -import re from typing import TYPE_CHECKING, List, Optional, Tuple import warnings @@ -55,6 +54,15 @@ from matplotlib.axis import Axis +def _color_in_style(style: str) -> bool: + """ + Check if there is a color letter in the style string. + """ + from matplotlib.colors import BASE_COLORS + + return not set(BASE_COLORS).isdisjoint(style) + + class MPLPlot: """ Base class for assembling a pandas plot using matplotlib @@ -200,8 +208,6 @@ def __init__( self._validate_color_args() def _validate_color_args(self): - import matplotlib.colors - if ( "color" in self.kwds and self.nseries == 1 @@ -233,13 +239,12 @@ def _validate_color_args(self): styles = [self.style] # need only a single match for s in styles: - for char in s: - if char in matplotlib.colors.BASE_COLORS: - raise ValueError( - "Cannot pass 'style' string with a color symbol and " - "'color' keyword argument. Please use one or the other or " - "pass 'style' without a color symbol" - ) + if _color_in_style(s): + raise ValueError( + "Cannot pass 'style' string with a color symbol and " + "'color' keyword argument. Please use one or the " + "other or pass 'style' without a color symbol" + ) def _iter_data(self, data=None, keep_index=False, fillna=None): if data is None: @@ -739,7 +744,7 @@ def _apply_style_colors(self, colors, kwds, col_num, label): style = self.style has_color = "color" in kwds or self.colormap is not None - nocolor_style = style is None or re.match("[a-z]+", style) is None + nocolor_style = style is None or not _color_in_style(style) if (has_color or self.subplots) and nocolor_style: if isinstance(colors, dict): kwds["color"] = colors[label] diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 128a7bdb6730a..3b3902647390d 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -205,6 +205,24 @@ def test_color_and_style_arguments(self): with pytest.raises(ValueError): df.plot(color=["red", "black"], style=["k-", "r--"]) + @pytest.mark.parametrize( + "color, expected", + [ + ("green", ["green"] * 4), + (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]), + ], + ) + def test_color_and_marker(self, color, expected): + # GH 21003 + df = DataFrame(np.random.random((7, 4))) + ax = df.plot(color=color, style="d--") + # check colors + result = [i.get_color() for i in ax.lines] + assert result == expected + # check markers and linestyles + assert all(i.get_linestyle() == "--" for i in ax.lines) + assert all(i.get_marker() == "d" for i in ax.lines) + def test_nonnumeric_exclude(self): df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}) ax = df.plot() diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index c296e2a6278c5..85c06b2e7b748 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -958,7 +958,7 @@ def test_plot_no_numeric_data(self): def test_style_single_ok(self): s = pd.Series([1, 2]) ax = s.plot(style="s", color="C3") - assert ax.lines[0].get_color() == ["C3"] + assert ax.lines[0].get_color() == "C3" @pytest.mark.parametrize( "index_name, old_label, new_label", From 45cec12f9f1b93778ac2cf0632174bb425b7539b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torsten=20W=C3=B6rtwein?= Date: Sat, 5 Sep 2020 10:50:03 -0400 Subject: [PATCH 441/632] BUG/ENH: to_pickle/read_pickle support compression for file ojects (#35736) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_typing.py | 4 ++-- pandas/core/frame.py | 4 ++-- pandas/io/common.py | 24 +++++++++--------------- pandas/io/formats/csvs.py | 15 ++++----------- pandas/io/json/_json.py | 11 ++--------- pandas/io/parsers.py | 13 +++++-------- pandas/io/pickle.py | 10 ++-------- pandas/io/stata.py | 30 +++++------------------------- pandas/tests/io/test_pickle.py | 29 +++++++++++++++++++++++++++++ 10 files changed, 61 insertions(+), 80 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 39e53daf516c4..b1229a5d5823d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -295,6 +295,7 @@ I/O - :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) - :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue: `35058`) - :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) +- :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) Plotting ^^^^^^^^ diff --git a/pandas/_typing.py b/pandas/_typing.py index 74bfc9134c3af..b237013ac7805 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -116,7 +116,7 @@ # compression keywords and compression -CompressionDict = Mapping[str, Optional[Union[str, int, bool]]] +CompressionDict = Dict[str, Any] CompressionOptions = Optional[Union[str, CompressionDict]] @@ -138,6 +138,6 @@ class IOargs(Generic[ModeVar, EncodingVar]): filepath_or_buffer: FileOrBuffer encoding: EncodingVar - compression: CompressionOptions + compression: CompressionDict should_close: bool mode: Union[ModeVar, str] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index c48bec9b670ad..1713743b98bff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -27,7 +27,6 @@ Iterable, Iterator, List, - Mapping, Optional, Sequence, Set, @@ -49,6 +48,7 @@ ArrayLike, Axes, Axis, + CompressionOptions, Dtype, FilePathOrBuffer, FrameOrSeriesUnion, @@ -2062,7 +2062,7 @@ def to_stata( variable_labels: Optional[Dict[Label, str]] = None, version: Optional[int] = 114, convert_strl: Optional[Sequence[Label]] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> None: """ diff --git a/pandas/io/common.py b/pandas/io/common.py index 2b13d54ec3aed..a80b89569f429 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -205,11 +205,13 @@ def get_filepath_or_buffer( """ filepath_or_buffer = stringify_path(filepath_or_buffer) + # handle compression dict + compression_method, compression = get_compression_method(compression) + compression_method = infer_compression(filepath_or_buffer, compression_method) + compression = dict(compression, method=compression_method) + # bz2 and xz do not write the byte order mark for utf-16 and utf-32 # print a warning when writing such files - compression_method = infer_compression( - filepath_or_buffer, get_compression_method(compression)[0] - ) if ( mode and "w" in mode @@ -238,7 +240,7 @@ def get_filepath_or_buffer( content_encoding = req.headers.get("Content-Encoding", None) if content_encoding == "gzip": # Override compression based on Content-Encoding header - compression = "gzip" + compression = {"method": "gzip"} reader = BytesIO(req.read()) req.close() return IOargs( @@ -374,11 +376,7 @@ def get_compression_method( if isinstance(compression, Mapping): compression_args = dict(compression) try: - # error: Incompatible types in assignment (expression has type - # "Union[str, int, None]", variable has type "Optional[str]") - compression_method = compression_args.pop( # type: ignore[assignment] - "method" - ) + compression_method = compression_args.pop("method") except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: @@ -652,12 +650,8 @@ def __init__( super().__init__(file, mode, **kwargs_zip) # type: ignore[arg-type] def write(self, data): - archive_name = self.filename - if self.archive_name is not None: - archive_name = self.archive_name - if archive_name is None: - # ZipFile needs a non-empty string - archive_name = "zip" + # ZipFile needs a non-empty string + archive_name = self.archive_name or self.filename or "zip" super().writestr(archive_name, data) @property diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 270caec022fef..15cd5c026c6b6 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -21,12 +21,7 @@ ) from pandas.core.dtypes.missing import notna -from pandas.io.common import ( - get_compression_method, - get_filepath_or_buffer, - get_handle, - infer_compression, -) +from pandas.io.common import get_filepath_or_buffer, get_handle class CSVFormatter: @@ -60,17 +55,15 @@ def __init__( if path_or_buf is None: path_or_buf = StringIO() - # Extract compression mode as given, if dict - compression, self.compression_args = get_compression_method(compression) - self.compression = infer_compression(path_or_buf, compression) - ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, - compression=self.compression, + compression=compression, mode=mode, storage_options=storage_options, ) + self.compression = ioargs.compression.pop("method") + self.compression_args = ioargs.compression self.path_or_buf = ioargs.filepath_or_buffer self.should_close = ioargs.should_close self.mode = ioargs.mode diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 7a3b76ff7e3d0..a4d923fdbe45a 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -19,12 +19,7 @@ from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.reshape.concat import concat -from pandas.io.common import ( - get_compression_method, - get_filepath_or_buffer, - get_handle, - infer_compression, -) +from pandas.io.common import get_compression_method, get_filepath_or_buffer, get_handle from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import build_table_schema, parse_table_schema from pandas.io.parsers import _validate_integer @@ -66,6 +61,7 @@ def to_json( ) path_or_buf = ioargs.filepath_or_buffer should_close = ioargs.should_close + compression = ioargs.compression if lines and orient != "records": raise ValueError("'lines' keyword only valid when 'orient' is records") @@ -616,9 +612,6 @@ def read_json( if encoding is None: encoding = "utf-8" - compression_method, compression = get_compression_method(compression) - compression_method = infer_compression(path_or_buf, compression_method) - compression = dict(compression, method=compression_method) ioargs = get_filepath_or_buffer( path_or_buf, encoding=encoding, diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index c6ef5221e7ead..a0466c5ac6b57 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -63,12 +63,7 @@ from pandas.core.series import Series from pandas.core.tools import datetimes as tools -from pandas.io.common import ( - get_filepath_or_buffer, - get_handle, - infer_compression, - validate_header_arg, -) +from pandas.io.common import get_filepath_or_buffer, get_handle, validate_header_arg from pandas.io.date_converters import generic_parser # BOM character (byte order mark) @@ -424,9 +419,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): if encoding is not None: encoding = re.sub("_", "-", encoding).lower() kwds["encoding"] = encoding - compression = kwds.get("compression", "infer") - compression = infer_compression(filepath_or_buffer, compression) # TODO: get_filepath_or_buffer could return # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] @@ -1976,6 +1969,10 @@ def __init__(self, src, **kwds): encoding = kwds.get("encoding") + # parsers.TextReader doesn't support compression dicts + if isinstance(kwds.get("compression"), dict): + kwds["compression"] = kwds["compression"]["method"] + if kwds.get("compression") is None and encoding: if isinstance(src, str): src = open(src, "rb") diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 857a2d1b69be4..655deb5ca3779 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -92,11 +92,8 @@ def to_pickle( mode="wb", storage_options=storage_options, ) - compression = ioargs.compression - if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": - compression = None f, fh = get_handle( - ioargs.filepath_or_buffer, "wb", compression=compression, is_text=False + ioargs.filepath_or_buffer, "wb", compression=ioargs.compression, is_text=False ) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL @@ -196,11 +193,8 @@ def read_pickle( ioargs = get_filepath_or_buffer( filepath_or_buffer, compression=compression, storage_options=storage_options ) - compression = ioargs.compression - if not isinstance(ioargs.filepath_or_buffer, str) and compression == "infer": - compression = None f, fh = get_handle( - ioargs.filepath_or_buffer, "rb", compression=compression, is_text=False + ioargs.filepath_or_buffer, "rb", compression=ioargs.compression, is_text=False ) # 1) try standard library Pickle diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 34d520004cc65..b3b16e04a5d9e 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -16,18 +16,7 @@ from pathlib import Path import struct import sys -from typing import ( - Any, - AnyStr, - BinaryIO, - Dict, - List, - Mapping, - Optional, - Sequence, - Tuple, - Union, -) +from typing import Any, AnyStr, BinaryIO, Dict, List, Optional, Sequence, Tuple, Union import warnings from dateutil.relativedelta import relativedelta @@ -58,13 +47,7 @@ from pandas.core.indexes.base import Index from pandas.core.series import Series -from pandas.io.common import ( - get_compression_method, - get_filepath_or_buffer, - get_handle, - infer_compression, - stringify_path, -) +from pandas.io.common import get_filepath_or_buffer, get_handle, stringify_path _version_error = ( "Version of given Stata file is {version}. pandas supports importing " @@ -1976,9 +1959,6 @@ def _open_file_binary_write( return fname, False, None # type: ignore[return-value] elif isinstance(fname, (str, Path)): # Extract compression mode as given, if dict - compression_typ, compression_args = get_compression_method(compression) - compression_typ = infer_compression(fname, compression_typ) - compression = dict(compression_args, method=compression_typ) ioargs = get_filepath_or_buffer( fname, mode="wb", compression=compression, storage_options=storage_options ) @@ -2235,7 +2215,7 @@ def __init__( time_stamp: Optional[datetime.datetime] = None, data_label: Optional[str] = None, variable_labels: Optional[Dict[Label, str]] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): super().__init__() @@ -3118,7 +3098,7 @@ def __init__( data_label: Optional[str] = None, variable_labels: Optional[Dict[Label, str]] = None, convert_strl: Optional[Sequence[Label]] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): # Copy to new list since convert_strl might be modified later @@ -3523,7 +3503,7 @@ def __init__( variable_labels: Optional[Dict[Label, str]] = None, convert_strl: Optional[Sequence[Label]] = None, version: Optional[int] = None, - compression: Union[str, Mapping[str, str], None] = "infer", + compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ): if version is None: diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 6331113ab8945..d1c6705dd7a6f 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -14,7 +14,9 @@ import datetime import glob import gzip +import io import os +from pathlib import Path import pickle import shutil from warnings import catch_warnings, simplefilter @@ -486,3 +488,30 @@ def test_read_pickle_with_subclass(): tm.assert_series_equal(result[0], expected[0]) assert isinstance(result[1], MyTz) + + +def test_pickle_binary_object_compression(compression): + """ + Read/write from binary file-objects w/wo compression. + + GH 26237, GH 29054, and GH 29570 + """ + df = tm.makeDataFrame() + + # reference for compression + with tm.ensure_clean() as path: + df.to_pickle(path, compression=compression) + reference = Path(path).read_bytes() + + # write + buffer = io.BytesIO() + df.to_pickle(buffer, compression=compression) + buffer.seek(0) + + # gzip and zip safe the filename: cannot compare the compressed content + assert buffer.getvalue() == reference or compression in ("gzip", "zip") + + # read + read_df = pd.read_pickle(buffer, compression=compression) + buffer.seek(0) + tm.assert_frame_equal(df, read_df) From 16544c902bf89a0352008b3b9e7325fd63103327 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 5 Sep 2020 15:55:11 +0100 Subject: [PATCH 442/632] TYP: Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias (#36137) --- ci/code_checks.sh | 8 ++++++++ pandas/core/apply.py | 16 +++++++--------- pandas/core/groupby/generic.py | 10 +++++----- pandas/core/groupby/grouper.py | 2 +- pandas/core/reshape/merge.py | 10 +++++----- pandas/core/reshape/pivot.py | 4 ++-- pandas/io/pytables.py | 6 +++--- 7 files changed, 31 insertions(+), 25 deletions(-) diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 6006d09bc3e78..8ee579cd25203 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -230,6 +230,9 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include=*.{py,pyx} '!r}' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + # ------------------------------------------------------------------------- + # Type annotations + MSG='Check for use of comment-based annotation syntax' ; echo $MSG invgrep -R --include="*.py" -P '# type: (?!ignore)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" @@ -238,6 +241,11 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then invgrep -R --include="*.py" -P '# type:\s?ignore(?!\[)' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of Union[Series, DataFrame] instead of FrameOrSeriesUnion alias' ; echo $MSG + invgrep -R --include="*.py" --exclude=_typing.py -E 'Union\[.*(Series.*DataFrame|DataFrame.*Series).*\]' pandas + RET=$(($RET + $?)) ; echo $MSG "DONE" + + # ------------------------------------------------------------------------- MSG='Check for use of foo.__class__ instead of type(foo)' ; echo $MSG invgrep -R --include=*.{py,pyx} '\.__class__' pandas RET=$(($RET + $?)) ; echo $MSG "DONE" diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 99a9e1377563c..bbf832f33065b 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1,12 +1,12 @@ import abc import inspect -from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type, Union +from typing import TYPE_CHECKING, Any, Dict, Iterator, Optional, Tuple, Type import numpy as np from pandas._config import option_context -from pandas._typing import Axis +from pandas._typing import Axis, FrameOrSeriesUnion from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import is_dict_like, is_list_like, is_sequence @@ -73,7 +73,7 @@ def series_generator(self) -> Iterator["Series"]: @abc.abstractmethod def wrap_results_for_axis( self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + ) -> FrameOrSeriesUnion: pass # --------------------------------------------------------------- @@ -289,9 +289,7 @@ def apply_series_generator(self) -> Tuple[ResType, "Index"]: return results, res_index - def wrap_results( - self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + def wrap_results(self, results: ResType, res_index: "Index") -> FrameOrSeriesUnion: from pandas import Series # see if we can infer the results @@ -335,7 +333,7 @@ def result_columns(self) -> "Index": def wrap_results_for_axis( self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + ) -> FrameOrSeriesUnion: """ return the results for the rows """ if self.result_type == "reduce": @@ -408,9 +406,9 @@ def result_columns(self) -> "Index": def wrap_results_for_axis( self, results: ResType, res_index: "Index" - ) -> Union["Series", "DataFrame"]: + ) -> FrameOrSeriesUnion: """ return the results for the columns """ - result: Union["Series", "DataFrame"] + result: FrameOrSeriesUnion # we have requested to expand if self.result_type == "expand": diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b855ce65f41b2..260e21b1f2593 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -308,7 +308,7 @@ def _aggregate_multiple_funcs(self, arg): arg = zip(columns, arg) - results: Dict[base.OutputKey, Union[Series, DataFrame]] = {} + results: Dict[base.OutputKey, FrameOrSeriesUnion] = {} for idx, (name, func) in enumerate(arg): obj = self @@ -332,7 +332,7 @@ def _wrap_series_output( self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Optional[Index], - ) -> Union[Series, DataFrame]: + ) -> FrameOrSeriesUnion: """ Wraps the output of a SeriesGroupBy operation into the expected result. @@ -355,7 +355,7 @@ def _wrap_series_output( indexed_output = {key.position: val for key, val in output.items()} columns = Index(key.label for key in output) - result: Union[Series, DataFrame] + result: FrameOrSeriesUnion if len(output) > 1: result = self.obj._constructor_expanddim(indexed_output, index=index) result.columns = columns @@ -373,7 +373,7 @@ def _wrap_aggregated_output( self, output: Mapping[base.OutputKey, Union[Series, np.ndarray]], index: Optional[Index], - ) -> Union[Series, DataFrame]: + ) -> FrameOrSeriesUnion: """ Wraps the output of a SeriesGroupBy aggregation into the expected result. @@ -1085,7 +1085,7 @@ def blk_func(bvalues: ArrayLike) -> ArrayLike: raise # We get here with a) EADtypes and b) object dtype - obj: Union[Series, DataFrame] + obj: FrameOrSeriesUnion # call our grouper again with only this block if isinstance(bvalues, ExtensionArray): # TODO(EA2D): special case not needed with 2D EAs diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 6678edc3821c8..59ea7781025c4 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -393,7 +393,7 @@ class Grouping: ---------- index : Index grouper : - obj Union[DataFrame, Series]: + obj : DataFrame or Series name : Label level : observed : bool, default False diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 602ff226f8878..f1c5486222ea1 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -6,14 +6,14 @@ import datetime from functools import partial import string -from typing import TYPE_CHECKING, Optional, Tuple, Union +from typing import TYPE_CHECKING, Optional, Tuple import warnings import numpy as np from pandas._libs import Timedelta, hashtable as libhashtable, lib import pandas._libs.join as libjoin -from pandas._typing import ArrayLike, FrameOrSeries +from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion from pandas.errors import MergeError from pandas.util._decorators import Appender, Substitution @@ -51,7 +51,7 @@ from pandas.core.sorting import is_int64_overflow_possible if TYPE_CHECKING: - from pandas import DataFrame, Series # noqa:F401 + from pandas import DataFrame # noqa:F401 @Substitution("\nleft : DataFrame") @@ -575,8 +575,8 @@ class _MergeOperation: def __init__( self, - left: Union["Series", "DataFrame"], - right: Union["Series", "DataFrame"], + left: FrameOrSeriesUnion, + right: FrameOrSeriesUnion, how: str = "inner", on=None, left_on=None, diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 969ac56e41860..842a42f80e1b7 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -12,7 +12,7 @@ import numpy as np -from pandas._typing import Label +from pandas._typing import FrameOrSeriesUnion, Label from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.cast import maybe_downcast_to_dtype @@ -200,7 +200,7 @@ def pivot_table( def _add_margins( - table: Union["Series", "DataFrame"], + table: FrameOrSeriesUnion, data, values, rows, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0913627324c48..e850a101a0a63 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -16,7 +16,7 @@ from pandas._libs import lib, writers as libwriters from pandas._libs.tslibs import timezones -from pandas._typing import ArrayLike, FrameOrSeries, Label +from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion, Label from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import PerformanceWarning @@ -2566,7 +2566,7 @@ class Fixed: pandas_kind: str format_type: str = "fixed" # GH#30962 needed by dask - obj_type: Type[Union[DataFrame, Series]] + obj_type: Type[FrameOrSeriesUnion] ndim: int encoding: str parent: HDFStore @@ -4442,7 +4442,7 @@ class AppendableFrameTable(AppendableTable): pandas_kind = "frame_table" table_type = "appendable_frame" ndim = 2 - obj_type: Type[Union[DataFrame, Series]] = DataFrame + obj_type: Type[FrameOrSeriesUnion] = DataFrame @property def is_transposed(self) -> bool: From bdb6e26feb26931377a92da1653ff4b95af6351d Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Sat, 5 Sep 2020 16:03:54 +0100 Subject: [PATCH 443/632] TYP: remove string literals for type annotations in pandas\core\frame.py (#36140) --- pandas/core/frame.py | 104 +++++++++++++++++++++---------------------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1713743b98bff..29d6fb9aa7d56 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -420,7 +420,7 @@ class DataFrame(NDFrame): _typ = "dataframe" @property - def _constructor(self) -> Type["DataFrame"]: + def _constructor(self) -> Type[DataFrame]: return DataFrame _constructor_sliced: Type[Series] = Series @@ -1233,7 +1233,7 @@ def __rmatmul__(self, other): # IO methods (to / from other formats) @classmethod - def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> "DataFrame": + def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. @@ -1671,7 +1671,7 @@ def from_records( columns=None, coerce_float=False, nrows=None, - ) -> "DataFrame": + ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. @@ -2012,7 +2012,7 @@ def _from_arrays( index, dtype: Optional[Dtype] = None, verify_integrity: bool = True, - ) -> "DataFrame": + ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. @@ -2720,7 +2720,7 @@ def memory_usage(self, index=True, deep=False) -> Series: ).append(result) return result - def transpose(self, *args, copy: bool = False) -> "DataFrame": + def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. @@ -2843,7 +2843,7 @@ def transpose(self, *args, copy: bool = False) -> "DataFrame": return result.__finalize__(self, method="transpose") @property - def T(self) -> "DataFrame": + def T(self) -> DataFrame: return self.transpose() # ---------------------------------------------------------------------- @@ -3503,7 +3503,7 @@ def eval(self, expr, inplace=False, **kwargs): return _eval(expr, inplace=inplace, **kwargs) - def select_dtypes(self, include=None, exclude=None) -> "DataFrame": + def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. @@ -3667,7 +3667,7 @@ def insert(self, loc, column, value, allow_duplicates=False) -> None: value = self._sanitize_column(column, value, broadcast=False) self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates) - def assign(self, **kwargs) -> "DataFrame": + def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. @@ -3965,7 +3965,7 @@ def _reindex_columns( allow_dups=False, ) - def _reindex_multi(self, axes, copy, fill_value) -> "DataFrame": + def _reindex_multi(self, axes, copy, fill_value) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ @@ -3998,7 +3998,7 @@ def align( limit=None, fill_axis=0, broadcast_axis=None, - ) -> "DataFrame": + ) -> DataFrame: return super().align( other, join=join, @@ -4067,7 +4067,7 @@ def set_axis(self, labels, axis: Axis = 0, inplace: bool = False): ("tolerance", None), ], ) - def reindex(self, *args, **kwargs) -> "DataFrame": + def reindex(self, *args, **kwargs) -> DataFrame: axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex") kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names @@ -4229,7 +4229,7 @@ def rename( inplace: bool = False, level: Optional[Level] = None, errors: str = "ignore", - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: """ Alter axes labels. @@ -4357,7 +4357,7 @@ def fillna( inplace=False, limit=None, downcast=None, - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: return super().fillna( value=value, method=method, @@ -4465,7 +4465,7 @@ def _replace_columnwise( return res.__finalize__(self) @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) - def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "DataFrame": + def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> DataFrame: return super().shift( periods=periods, freq=freq, axis=axis, fill_value=fill_value ) @@ -4666,7 +4666,7 @@ def reset_index( inplace: bool = False, col_level: Hashable = 0, col_fill: Label = "", - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: """ Reset the index, or a level of it. @@ -4910,20 +4910,20 @@ def _maybe_casted_values(index, labels=None): # Reindex-based selection methods @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) - def isna(self) -> "DataFrame": + def isna(self) -> DataFrame: result = self._constructor(self._data.isna(func=isna)) return result.__finalize__(self, method="isna") @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) - def isnull(self) -> "DataFrame": + def isnull(self) -> DataFrame: return self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) - def notna(self) -> "DataFrame": + def notna(self) -> DataFrame: return ~self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) - def notnull(self) -> "DataFrame": + def notnull(self) -> DataFrame: return ~self.isna() def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): @@ -5074,7 +5074,7 @@ def drop_duplicates( keep: Union[str, bool] = "first", inplace: bool = False, ignore_index: bool = False, - ) -> Optional["DataFrame"]: + ) -> Optional[DataFrame]: """ Return DataFrame with duplicate rows removed. @@ -5168,7 +5168,7 @@ def duplicated( self, subset: Optional[Union[Hashable, Sequence[Hashable]]] = None, keep: Union[str, bool] = "first", - ) -> "Series": + ) -> Series: """ Return boolean Series denoting duplicate rows. @@ -5619,7 +5619,7 @@ def value_counts( return counts - def nlargest(self, n, columns, keep="first") -> "DataFrame": + def nlargest(self, n, columns, keep="first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in descending order. @@ -5728,7 +5728,7 @@ def nlargest(self, n, columns, keep="first") -> "DataFrame": """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() - def nsmallest(self, n, columns, keep="first") -> "DataFrame": + def nsmallest(self, n, columns, keep="first") -> DataFrame: """ Return the first `n` rows ordered by `columns` in ascending order. @@ -5830,7 +5830,7 @@ def nsmallest(self, n, columns, keep="first") -> "DataFrame": self, n=n, keep=keep, columns=columns ).nsmallest() - def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame": + def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame: """ Swap levels i and j in a MultiIndex on a particular axis. @@ -5861,7 +5861,7 @@ def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame": result.columns = result.columns.swaplevel(i, j) return result - def reorder_levels(self, order, axis=0) -> "DataFrame": + def reorder_levels(self, order, axis=0) -> DataFrame: """ Rearrange index levels using input order. May not drop or duplicate levels. @@ -5894,7 +5894,7 @@ def reorder_levels(self, order, axis=0) -> "DataFrame": # ---------------------------------------------------------------------- # Arithmetic / combination related - def _combine_frame(self, other: "DataFrame", func, fill_value=None): + def _combine_frame(self, other: DataFrame, func, fill_value=None): # at this point we have `self._indexed_same(other)` if fill_value is None: @@ -5914,7 +5914,7 @@ def _arith_op(left, right): new_data = ops.dispatch_to_series(self, other, _arith_op) return new_data - def _construct_result(self, result) -> "DataFrame": + def _construct_result(self, result) -> DataFrame: """ Wrap the result of an arithmetic, comparison, or logical operation. @@ -6031,11 +6031,11 @@ def _construct_result(self, result) -> "DataFrame": @Appender(_shared_docs["compare"] % _shared_doc_kwargs) def compare( self, - other: "DataFrame", + other: DataFrame, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, - ) -> "DataFrame": + ) -> DataFrame: return super().compare( other=other, align_axis=align_axis, @@ -6044,8 +6044,8 @@ def compare( ) def combine( - self, other: "DataFrame", func, fill_value=None, overwrite=True - ) -> "DataFrame": + self, other: DataFrame, func, fill_value=None, overwrite=True + ) -> DataFrame: """ Perform column-wise combine with another DataFrame. @@ -6212,7 +6212,7 @@ def combine( # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns) - def combine_first(self, other: "DataFrame") -> "DataFrame": + def combine_first(self, other: DataFrame) -> DataFrame: """ Update null elements with value in the same location in `other`. @@ -6718,7 +6718,7 @@ def groupby( @Substitution("") @Appender(_shared_docs["pivot"]) - def pivot(self, index=None, columns=None, values=None) -> "DataFrame": + def pivot(self, index=None, columns=None, values=None) -> DataFrame: from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) @@ -6870,7 +6870,7 @@ def pivot_table( dropna=True, margins_name="All", observed=False, - ) -> "DataFrame": + ) -> DataFrame: from pandas.core.reshape.pivot import pivot_table return pivot_table( @@ -7056,7 +7056,7 @@ def stack(self, level=-1, dropna=True): def explode( self, column: Union[str, Tuple], ignore_index: bool = False - ) -> "DataFrame": + ) -> DataFrame: """ Transform each element of a list-like to a row, replicating index values. @@ -7211,7 +7211,7 @@ def melt( value_name="value", col_level=None, ignore_index=True, - ) -> "DataFrame": + ) -> DataFrame: return melt( self, @@ -7299,7 +7299,7 @@ def melt( 1 255.0""" ), ) - def diff(self, periods: int = 1, axis: Axis = 0) -> "DataFrame": + def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame: bm_axis = self._get_block_manager_axis(axis) self._consolidate_inplace() @@ -7462,7 +7462,7 @@ def _aggregate(self, arg, axis=0, *args, **kwargs): klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) - def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame": + def transform(self, func, axis=0, *args, **kwargs) -> DataFrame: axis = self._get_axis_number(axis) if axis == 1: return self.T.transform(func, *args, **kwargs).T @@ -7616,7 +7616,7 @@ def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): ) return op.get_result() - def applymap(self, func) -> "DataFrame": + def applymap(self, func) -> DataFrame: """ Apply a function to a Dataframe elementwise. @@ -7678,7 +7678,7 @@ def infer(x): def append( self, other, ignore_index=False, verify_integrity=False, sort=False - ) -> "DataFrame": + ) -> DataFrame: """ Append rows of `other` to the end of caller, returning a new object. @@ -7818,7 +7818,7 @@ def append( def join( self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False - ) -> "DataFrame": + ) -> DataFrame: """ Join columns of another DataFrame. @@ -8009,7 +8009,7 @@ def merge( copy=True, indicator=False, validate=None, - ) -> "DataFrame": + ) -> DataFrame: from pandas.core.reshape.merge import merge return merge( @@ -8028,7 +8028,7 @@ def merge( validate=validate, ) - def round(self, decimals=0, *args, **kwargs) -> "DataFrame": + def round(self, decimals=0, *args, **kwargs) -> DataFrame: """ Round a DataFrame to a variable number of decimal places. @@ -8142,7 +8142,7 @@ def _series_round(s, decimals): # ---------------------------------------------------------------------- # Statistical methods, etc. - def corr(self, method="pearson", min_periods=1) -> "DataFrame": + def corr(self, method="pearson", min_periods=1) -> DataFrame: """ Compute pairwise correlation of columns, excluding NA/null values. @@ -8233,7 +8233,7 @@ def corr(self, method="pearson", min_periods=1) -> "DataFrame": def cov( self, min_periods: Optional[int] = None, ddof: Optional[int] = 1 - ) -> "DataFrame": + ) -> DataFrame: """ Compute pairwise covariance of columns, excluding NA/null values. @@ -8636,7 +8636,7 @@ def func(values): else: return op(values, axis=axis, skipna=skipna, **kwds) - def _get_data(axis_matters: bool) -> "DataFrame": + def _get_data(axis_matters: bool) -> DataFrame: if filter_type is None: data = self._get_numeric_data() elif filter_type == "bool": @@ -8937,7 +8937,7 @@ def _get_agg_axis(self, axis_num: int) -> Index: else: raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})") - def mode(self, axis=0, numeric_only=False, dropna=True) -> "DataFrame": + def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame: """ Get the mode(s) of each element along the selected axis. @@ -9122,7 +9122,7 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"): def to_timestamp( self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True - ) -> "DataFrame": + ) -> DataFrame: """ Cast to DatetimeIndex of timestamps, at *beginning* of period. @@ -9151,7 +9151,7 @@ def to_timestamp( setattr(new_obj, axis_name, new_ax) return new_obj - def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> "DataFrame": + def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame: """ Convert DataFrame from DatetimeIndex to PeriodIndex. @@ -9180,7 +9180,7 @@ def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> "DataFrame" setattr(new_obj, axis_name, new_ax) return new_obj - def isin(self, values) -> "DataFrame": + def isin(self, values) -> DataFrame: """ Whether each element in the DataFrame is contained in values. @@ -9287,10 +9287,10 @@ def isin(self, values) -> "DataFrame": _info_axis_number = 1 _info_axis_name = "columns" - index: "Index" = properties.AxisProperty( + index: Index = properties.AxisProperty( axis=1, doc="The index (row labels) of the DataFrame." ) - columns: "Index" = properties.AxisProperty( + columns: Index = properties.AxisProperty( axis=0, doc="The column labels of the DataFrame." ) From 6e280081c8fdf35b6e41d9d450b0e0172aea8f7f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 5 Sep 2020 10:53:21 -0700 Subject: [PATCH 444/632] STY+CI: check for private function access across modules (#36144) --- Makefile | 7 +++ ci/code_checks.sh | 8 ++++ pandas/_libs/algos.pyx | 14 +++--- pandas/core/algorithms.py | 8 ++-- pandas/core/arrays/sparse/array.py | 2 +- pandas/core/internals/blocks.py | 2 +- pandas/core/missing.py | 2 +- pandas/plotting/_matplotlib/compat.py | 10 ++-- pandas/plotting/_matplotlib/core.py | 4 +- pandas/plotting/_matplotlib/tools.py | 2 +- pandas/tests/plotting/common.py | 8 ++-- pandas/tests/plotting/test_frame.py | 4 +- pandas/tests/plotting/test_misc.py | 4 +- scripts/validate_unwanted_patterns.py | 69 +++++++++++++++++++++++++-- 14 files changed, 111 insertions(+), 33 deletions(-) diff --git a/Makefile b/Makefile index f26689ab65ba5..4a9a48992f92f 100644 --- a/Makefile +++ b/Makefile @@ -25,3 +25,10 @@ doc: cd doc; \ python make.py clean; \ python make.py html + +check: + python3 scripts/validate_unwanted_patterns.py \ + --validation-type="private_function_across_module" \ + --included-file-extensions="py" \ + --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored \ + pandas/ diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 8ee579cd25203..875f1dbb83ce3 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -116,6 +116,14 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then fi RET=$(($RET + $?)) ; echo $MSG "DONE" + MSG='Check for use of private module attribute access' ; echo $MSG + if [[ "$GITHUB_ACTIONS" == "true" ]]; then + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored --format="##[error]{source_path}:{line_number}:{msg}" pandas/ + else + $BASE_DIR/scripts/validate_unwanted_patterns.py --validation-type="private_function_across_module" --included-file-extensions="py" --excluded-file-paths=pandas/tests,asv_bench/,pandas/_vendored pandas/ + fi + RET=$(($RET + $?)) ; echo $MSG "DONE" + echo "isort --version-number" isort --version-number diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 0a70afda893cf..c4723a5f064c7 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -412,7 +412,7 @@ ctypedef fused algos_t: uint8_t -def _validate_limit(nobs: int, limit=None) -> int: +def validate_limit(nobs: int, limit=None) -> int: """ Check that the `limit` argument is a positive integer. @@ -452,7 +452,7 @@ def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): indexer = np.empty(nright, dtype=np.int64) indexer[:] = -1 - lim = _validate_limit(nright, limit) + lim = validate_limit(nright, limit) if nleft == 0 or nright == 0 or new[nright - 1] < old[0]: return indexer @@ -509,7 +509,7 @@ def pad_inplace(algos_t[:] values, const uint8_t[:] mask, limit=None): if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) val = values[0] for i in range(N): @@ -537,7 +537,7 @@ def pad_2d_inplace(algos_t[:, :] values, const uint8_t[:, :] mask, limit=None): if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) for j in range(K): fill_count = 0 @@ -593,7 +593,7 @@ def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None) -> ndarray: indexer = np.empty(nright, dtype=np.int64) indexer[:] = -1 - lim = _validate_limit(nright, limit) + lim = validate_limit(nright, limit) if nleft == 0 or nright == 0 or new[0] > old[nleft - 1]: return indexer @@ -651,7 +651,7 @@ def backfill_inplace(algos_t[:] values, const uint8_t[:] mask, limit=None): if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) val = values[N - 1] for i in range(N - 1, -1, -1): @@ -681,7 +681,7 @@ def backfill_2d_inplace(algos_t[:, :] values, if N == 0: return - lim = _validate_limit(N, limit) + lim = validate_limit(N, limit) for j in range(K): fill_count = 0 diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index f297c7165208f..50ec3714f454b 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -60,7 +60,7 @@ from pandas.core.indexers import validate_indices if TYPE_CHECKING: - from pandas import Categorical, DataFrame, Series + from pandas import Categorical, DataFrame, Series # noqa:F401 _shared_docs: Dict[str, str] = {} @@ -767,7 +767,7 @@ def value_counts( counts = result._values else: - keys, counts = _value_counts_arraylike(values, dropna) + keys, counts = value_counts_arraylike(values, dropna) result = Series(counts, index=keys, name=name) @@ -780,8 +780,8 @@ def value_counts( return result -# Called once from SparseArray -def _value_counts_arraylike(values, dropna: bool): +# Called once from SparseArray, otherwise could be private +def value_counts_arraylike(values, dropna: bool): """ Parameters ---------- diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 1531f7b292365..47c960dc969d6 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -735,7 +735,7 @@ def value_counts(self, dropna=True): """ from pandas import Index, Series - keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) + keys, counts = algos.value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 3bcd4debbf41a..9f4e535dc787d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -390,7 +390,7 @@ def fillna( mask = isna(self.values) if limit is not None: - limit = libalgos._validate_limit(None, limit=limit) + limit = libalgos.validate_limit(None, limit=limit) mask[mask.cumsum(self.ndim - 1) > limit] = False if not self._can_hold_na: diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 7802c5cbdbfb3..be66b19d10064 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -228,7 +228,7 @@ def interpolate_1d( ) # default limit is unlimited GH #16282 - limit = algos._validate_limit(nobs=None, limit=limit) + limit = algos.validate_limit(nobs=None, limit=limit) # These are sets of index pointers to invalid values... i.e. {0, 1, etc... all_nans = set(np.flatnonzero(invalid)) diff --git a/pandas/plotting/_matplotlib/compat.py b/pandas/plotting/_matplotlib/compat.py index 7f107f18eca25..964596d9b6319 100644 --- a/pandas/plotting/_matplotlib/compat.py +++ b/pandas/plotting/_matplotlib/compat.py @@ -17,8 +17,8 @@ def inner(): return inner -_mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge) -_mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge) -_mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) -_mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) -_mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) +mpl_ge_2_2_3 = _mpl_version("2.2.3", operator.ge) +mpl_ge_3_0_0 = _mpl_version("3.0.0", operator.ge) +mpl_ge_3_1_0 = _mpl_version("3.1.0", operator.ge) +mpl_ge_3_2_0 = _mpl_version("3.2.0", operator.ge) +mpl_ge_3_3_0 = _mpl_version("3.3.0", operator.ge) diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index def4a1dc3f5c4..8275c0991e464 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -29,7 +29,7 @@ import pandas.core.common as com from pandas.io.formats.printing import pprint_thing -from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 +from pandas.plotting._matplotlib.compat import mpl_ge_3_0_0 from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.timeseries import ( @@ -944,7 +944,7 @@ def _plot_colorbar(self, ax: "Axes", **kwds): img = ax.collections[-1] cbar = self.fig.colorbar(img, ax=ax, **kwds) - if _mpl_ge_3_0_0(): + if mpl_ge_3_0_0(): # The workaround below is no longer necessary. return diff --git a/pandas/plotting/_matplotlib/tools.py b/pandas/plotting/_matplotlib/tools.py index 98aaab6838fba..c5b44f37150bb 100644 --- a/pandas/plotting/_matplotlib/tools.py +++ b/pandas/plotting/_matplotlib/tools.py @@ -307,7 +307,7 @@ def handle_shared_axes( sharey: bool, ): if nplots > 1: - if compat._mpl_ge_3_2_0(): + if compat.mpl_ge_3_2_0(): row_num = lambda x: x.get_subplotspec().rowspan.start col_num = lambda x: x.get_subplotspec().colspan.start else: diff --git a/pandas/tests/plotting/common.py b/pandas/tests/plotting/common.py index b753c96af6290..9301a29933d45 100644 --- a/pandas/tests/plotting/common.py +++ b/pandas/tests/plotting/common.py @@ -28,10 +28,10 @@ def setup_method(self, method): mpl.rcdefaults() - self.mpl_ge_2_2_3 = compat._mpl_ge_2_2_3() - self.mpl_ge_3_0_0 = compat._mpl_ge_3_0_0() - self.mpl_ge_3_1_0 = compat._mpl_ge_3_1_0() - self.mpl_ge_3_2_0 = compat._mpl_ge_3_2_0() + self.mpl_ge_2_2_3 = compat.mpl_ge_2_2_3() + self.mpl_ge_3_0_0 = compat.mpl_ge_3_0_0() + self.mpl_ge_3_1_0 = compat.mpl_ge_3_1_0() + self.mpl_ge_3_2_0 = compat.mpl_ge_3_2_0() self.bp_n_objects = 7 self.polycollection_factor = 2 diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 3b3902647390d..d2b22c7a4c2e3 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -51,7 +51,7 @@ def _assert_xtickslabels_visibility(self, axes, expected): @pytest.mark.xfail(reason="Waiting for PR 34334", strict=True) @pytest.mark.slow def test_plot(self): - from pandas.plotting._matplotlib.compat import _mpl_ge_3_1_0 + from pandas.plotting._matplotlib.compat import mpl_ge_3_1_0 df = self.tdf _check_plot_works(df.plot, grid=False) @@ -69,7 +69,7 @@ def test_plot(self): self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) df = DataFrame({"x": [1, 2], "y": [3, 4]}) - if _mpl_ge_3_1_0(): + if mpl_ge_3_1_0(): msg = "'Line2D' object has no property 'blarg'" else: msg = "Unknown property blarg" diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 130acaa8bcd58..0208ab3e0225b 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -96,7 +96,7 @@ def test_bootstrap_plot(self): class TestDataFramePlots(TestPlotBase): @td.skip_if_no_scipy def test_scatter_matrix_axis(self): - from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0 + from pandas.plotting._matplotlib.compat import mpl_ge_3_0_0 scatter_matrix = plotting.scatter_matrix @@ -105,7 +105,7 @@ def test_scatter_matrix_axis(self): # we are plotting multiples on a sub-plot with tm.assert_produces_warning( - UserWarning, raise_on_extra_warnings=_mpl_ge_3_0_0() + UserWarning, raise_on_extra_warnings=mpl_ge_3_0_0() ): axes = _check_plot_works( scatter_matrix, filterwarnings="always", frame=df, range_padding=0.1 diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 193fef026a96b..1a6d8cc8b9914 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -16,9 +16,7 @@ import sys import token import tokenize -from typing import IO, Callable, FrozenSet, Iterable, List, Tuple - -PATHS_TO_IGNORE: Tuple[str, ...] = ("asv_bench/env",) +from typing import IO, Callable, FrozenSet, Iterable, List, Set, Tuple def _get_literal_string_prefix_len(token_string: str) -> int: @@ -114,6 +112,58 @@ def bare_pytest_raises(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: ) +PRIVATE_FUNCTIONS_ALLOWED = {"sys._getframe"} # no known alternative + + +def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: + """ + Checking that a private function is not used across modules. + Parameters + ---------- + file_obj : IO + File-like object containing the Python code to validate. + Yields + ------ + line_number : int + Line number of the private function that is used across modules. + msg : str + Explenation of the error. + """ + contents = file_obj.read() + tree = ast.parse(contents) + + imported_modules: Set[str] = set() + + for node in ast.walk(tree): + if isinstance(node, (ast.Import, ast.ImportFrom)): + for module in node.names: + module_fqdn = module.name if module.asname is None else module.asname + imported_modules.add(module_fqdn) + + if not isinstance(node, ast.Call): + continue + + try: + module_name = node.func.value.id + function_name = node.func.attr + except AttributeError: + continue + + # Exception section # + + # (Debatable) Class case + if module_name[0].isupper(): + continue + # (Debatable) Dunder methods case + elif function_name.startswith("__") and function_name.endswith("__"): + continue + elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED: + continue + + if module_name in imported_modules and function_name.startswith("_"): + yield (node.lineno, f"Private function '{module_name}.{function_name}'") + + def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]: """ This test case is necessary after 'Black' (https://github.com/psf/black), @@ -293,6 +343,7 @@ def main( source_path: str, output_format: str, file_extensions_to_check: str, + excluded_file_paths: str, ) -> bool: """ Main entry point of the script. @@ -305,6 +356,10 @@ def main( Source path representing path to a file/directory. output_format : str Output format of the error message. + file_extensions_to_check : str + Coma seperated values of what file extensions to check. + excluded_file_paths : str + Coma seperated values of what file paths to exclude during the check. Returns ------- @@ -325,6 +380,7 @@ def main( FILE_EXTENSIONS_TO_CHECK: FrozenSet[str] = frozenset( file_extensions_to_check.split(",") ) + PATHS_TO_IGNORE = frozenset(excluded_file_paths.split(",")) if os.path.isfile(source_path): file_path = source_path @@ -362,6 +418,7 @@ def main( if __name__ == "__main__": available_validation_types: List[str] = [ "bare_pytest_raises", + "private_function_across_module", "strings_to_concatenate", "strings_with_wrong_placed_whitespace", ] @@ -389,6 +446,11 @@ def main( default="py,pyx,pxd,pxi", help="Coma seperated file extensions to check.", ) + parser.add_argument( + "--excluded-file-paths", + default="asv_bench/env", + help="Comma separated file extensions to check.", + ) args = parser.parse_args() @@ -398,5 +460,6 @@ def main( source_path=args.path, output_format=args.format, file_extensions_to_check=args.included_file_extensions, + excluded_file_paths=args.excluded_file_paths, ) ) From 42289d0d106fb813b1ef77e23ca0186b6c8cff30 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 5 Sep 2020 10:54:30 -0700 Subject: [PATCH 445/632] CLN: unused case in compare_or_regex_search (#36143) --- pandas/core/array_algos/replace.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/pandas/core/array_algos/replace.py b/pandas/core/array_algos/replace.py index 6ac3cc1f9f2fe..09f9aefd64096 100644 --- a/pandas/core/array_algos/replace.py +++ b/pandas/core/array_algos/replace.py @@ -3,7 +3,7 @@ """ import operator import re -from typing import Optional, Pattern, Union +from typing import Pattern, Union import numpy as np @@ -14,14 +14,10 @@ is_numeric_v_string_like, is_scalar, ) -from pandas.core.dtypes.missing import isna def compare_or_regex_search( - a: ArrayLike, - b: Union[Scalar, Pattern], - regex: bool = False, - mask: Optional[ArrayLike] = None, + a: ArrayLike, b: Union[Scalar, Pattern], regex: bool, mask: ArrayLike, ) -> Union[ArrayLike, bool]: """ Compare two array_like inputs of the same shape or two scalar values @@ -33,8 +29,8 @@ def compare_or_regex_search( ---------- a : array_like b : scalar or regex pattern - regex : bool, default False - mask : array_like or None (default) + regex : bool + mask : array_like Returns ------- @@ -68,8 +64,6 @@ def _check_comparison_types( ) # GH#32621 use mask to avoid comparing to NAs - if mask is None and isinstance(a, np.ndarray) and not isinstance(b, np.ndarray): - mask = np.reshape(~(isna(a)), a.shape) if isinstance(a, np.ndarray): a = a[mask] From cc54943c5f64d38232b009d0451a763842682d62 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Sat, 5 Sep 2020 12:40:06 -0700 Subject: [PATCH 446/632] REF: window/test_dtypes.py with pytest idioms (#35918) --- pandas/tests/window/conftest.py | 31 +++ pandas/tests/window/test_dtypes.py | 315 ++++++++--------------------- 2 files changed, 118 insertions(+), 228 deletions(-) diff --git a/pandas/tests/window/conftest.py b/pandas/tests/window/conftest.py index eb8252d5731be..7f03fa2a5ea0d 100644 --- a/pandas/tests/window/conftest.py +++ b/pandas/tests/window/conftest.py @@ -308,3 +308,34 @@ def which(request): def halflife_with_times(request): """Halflife argument for EWM when times is specified.""" return request.param + + +@pytest.fixture( + params=[ + "object", + "category", + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "float16", + "float32", + "float64", + "m8[ns]", + "M8[ns]", + pytest.param( + "datetime64[ns, UTC]", + marks=pytest.mark.skip( + "direct creation of extension dtype datetime64[ns, UTC] " + "is not supported ATM" + ), + ), + ] +) +def dtypes(request): + """Dtypes for window tests""" + return request.param diff --git a/pandas/tests/window/test_dtypes.py b/pandas/tests/window/test_dtypes.py index 0aa5bf019ff5e..245b48b351684 100644 --- a/pandas/tests/window/test_dtypes.py +++ b/pandas/tests/window/test_dtypes.py @@ -1,5 +1,3 @@ -from itertools import product - import numpy as np import pytest @@ -10,234 +8,95 @@ # gh-12373 : rolling functions error on float32 data # make sure rolling functions works for different dtypes # -# NOTE that these are yielded tests and so _create_data -# is explicitly called. -# # further note that we are only checking rolling for fully dtype # compliance (though both expanding and ewm inherit) -class Dtype: - window = 2 - - funcs = { - "count": lambda v: v.count(), - "max": lambda v: v.max(), - "min": lambda v: v.min(), - "sum": lambda v: v.sum(), - "mean": lambda v: v.mean(), - "std": lambda v: v.std(), - "var": lambda v: v.var(), - "median": lambda v: v.median(), - } - - def get_expects(self): - expects = { - "sr1": { - "count": Series([1, 2, 2, 2, 2], dtype="float64"), - "max": Series([np.nan, 1, 2, 3, 4], dtype="float64"), - "min": Series([np.nan, 0, 1, 2, 3], dtype="float64"), - "sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"), - "mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"), - "std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"), - "var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"), - "median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"), +def get_dtype(dtype, coerce_int=None): + if coerce_int is False and "int" in dtype: + return None + if dtype != "category": + return np.dtype(dtype) + return dtype + + +@pytest.mark.parametrize( + "method, data, expected_data, coerce_int", + [ + ("count", np.arange(5), [1, 2, 2, 2, 2], True), + ("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True), + ("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False), + ("max", np.arange(5), [np.nan, 1, 2, 3, 4], True), + ("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True), + ("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False), + ("min", np.arange(5), [np.nan, 0, 1, 2, 3], True), + ("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True), + ("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False), + ("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True), + ("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True), + ("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False), + ("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True), + ("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True), + ("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False), + ("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True), + ("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True), + ( + "std", + [0, 1, 2, np.nan, 4], + [np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2, + False, + ), + ("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True), + ("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True), + ("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False), + ("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True), + ("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True), + ("median", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False), + ], +) +def test_series_dtypes(method, data, expected_data, coerce_int, dtypes): + s = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int)) + if dtypes in ("m8[ns]", "M8[ns]") and method != "count": + msg = "No numeric types to aggregate" + with pytest.raises(DataError, match=msg): + getattr(s.rolling(2), method)() + else: + result = getattr(s.rolling(2), method)() + expected = Series(expected_data, dtype="float64") + tm.assert_almost_equal(result, expected) + + +@pytest.mark.parametrize( + "method, expected_data", + [ + ("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}), + ("max", {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])}), + ("min", {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])}), + ( + "sum", + {0: Series([np.nan, 2, 6, 10, 14]), 1: Series([np.nan, 4, 8, 12, 16])}, + ), + ("mean", {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}), + ( + "std", + { + 0: Series([np.nan] + [np.sqrt(2)] * 4), + 1: Series([np.nan] + [np.sqrt(2)] * 4), }, - "sr2": { - "count": Series([1, 2, 2, 2, 2], dtype="float64"), - "max": Series([np.nan, 10, 8, 6, 4], dtype="float64"), - "min": Series([np.nan, 8, 6, 4, 2], dtype="float64"), - "sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"), - "mean": Series([np.nan, 9, 7, 5, 3], dtype="float64"), - "std": Series([np.nan] + [np.sqrt(2)] * 4, dtype="float64"), - "var": Series([np.nan, 2, 2, 2, 2], dtype="float64"), - "median": Series([np.nan, 9, 7, 5, 3], dtype="float64"), - }, - "sr3": { - "count": Series([1, 2, 2, 1, 1], dtype="float64"), - "max": Series([np.nan, 1, 2, np.nan, np.nan], dtype="float64"), - "min": Series([np.nan, 0, 1, np.nan, np.nan], dtype="float64"), - "sum": Series([np.nan, 1, 3, np.nan, np.nan], dtype="float64"), - "mean": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"), - "std": Series( - [np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2, dtype="float64" - ), - "var": Series([np.nan, 0.5, 0.5, np.nan, np.nan], dtype="float64"), - "median": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"), - }, - "df": { - "count": DataFrame( - {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, - dtype="float64", - ), - "max": DataFrame( - {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])}, - dtype="float64", - ), - "min": DataFrame( - {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])}, - dtype="float64", - ), - "sum": DataFrame( - { - 0: Series([np.nan, 2, 6, 10, 14]), - 1: Series([np.nan, 4, 8, 12, 16]), - }, - dtype="float64", - ), - "mean": DataFrame( - {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}, - dtype="float64", - ), - "std": DataFrame( - { - 0: Series([np.nan] + [np.sqrt(2)] * 4), - 1: Series([np.nan] + [np.sqrt(2)] * 4), - }, - dtype="float64", - ), - "var": DataFrame( - {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])}, - dtype="float64", - ), - "median": DataFrame( - {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}, - dtype="float64", - ), - }, - } - return expects - - def _create_dtype_data(self, dtype): - sr1 = Series(np.arange(5), dtype=dtype) - sr2 = Series(np.arange(10, 0, -2), dtype=dtype) - sr3 = sr1.copy() - sr3[3] = np.NaN - df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype) - - data = {"sr1": sr1, "sr2": sr2, "sr3": sr3, "df": df} - - return data - - def _create_data(self): - self.data = self._create_dtype_data(self.dtype) - self.expects = self.get_expects() - - def test_dtypes(self): - self._create_data() - for f_name, d_name in product(self.funcs.keys(), self.data.keys()): - - f = self.funcs[f_name] - d = self.data[d_name] - exp = self.expects[d_name][f_name] - self.check_dtypes(f, f_name, d, d_name, exp) - - def check_dtypes(self, f, f_name, d, d_name, exp): - roll = d.rolling(window=self.window) - result = f(roll) - tm.assert_almost_equal(result, exp) - - -class TestDtype_object(Dtype): - dtype = object - - -class Dtype_integer(Dtype): - pass - - -class TestDtype_int8(Dtype_integer): - dtype = np.int8 - - -class TestDtype_int16(Dtype_integer): - dtype = np.int16 - - -class TestDtype_int32(Dtype_integer): - dtype = np.int32 - - -class TestDtype_int64(Dtype_integer): - dtype = np.int64 - - -class Dtype_uinteger(Dtype): - pass - - -class TestDtype_uint8(Dtype_uinteger): - dtype = np.uint8 - - -class TestDtype_uint16(Dtype_uinteger): - dtype = np.uint16 - - -class TestDtype_uint32(Dtype_uinteger): - dtype = np.uint32 - - -class TestDtype_uint64(Dtype_uinteger): - dtype = np.uint64 - - -class Dtype_float(Dtype): - pass - - -class TestDtype_float16(Dtype_float): - dtype = np.float16 - - -class TestDtype_float32(Dtype_float): - dtype = np.float32 - - -class TestDtype_float64(Dtype_float): - dtype = np.float64 - - -class TestDtype_category(Dtype): - dtype = "category" - include_df = False - - def _create_dtype_data(self, dtype): - sr1 = Series(range(5), dtype=dtype) - sr2 = Series(range(10, 0, -2), dtype=dtype) - - data = {"sr1": sr1, "sr2": sr2} - - return data - - -class DatetimeLike(Dtype): - def check_dtypes(self, f, f_name, d, d_name, exp): - - roll = d.rolling(window=self.window) - if f_name == "count": - result = f(roll) - tm.assert_almost_equal(result, exp) - - else: - msg = "No numeric types to aggregate" - with pytest.raises(DataError, match=msg): - f(roll) - - -class TestDtype_timedelta(DatetimeLike): - dtype = np.dtype("m8[ns]") - - -class TestDtype_datetime(DatetimeLike): - dtype = np.dtype("M8[ns]") - - -class TestDtype_datetime64UTC(DatetimeLike): - dtype = "datetime64[ns, UTC]" - - def _create_data(self): - pytest.skip( - "direct creation of extension dtype " - "datetime64[ns, UTC] is not supported ATM" - ) + ), + ("var", {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])}), + ("median", {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])}), + ], +) +def test_dataframe_dtypes(method, expected_data, dtypes): + if dtypes == "category": + pytest.skip("Category dataframe testing not implemented.") + df = DataFrame(np.arange(10).reshape((5, 2)), dtype=get_dtype(dtypes)) + if dtypes in ("m8[ns]", "M8[ns]") and method != "count": + msg = "No numeric types to aggregate" + with pytest.raises(DataError, match=msg): + getattr(df.rolling(2), method)() + else: + result = getattr(df.rolling(2), method)() + expected = DataFrame(expected_data, dtype="float64") + tm.assert_frame_equal(result, expected) From 8c3ad648aa5b8f351eaa0081a8944f3398d36d57 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Sat, 5 Sep 2020 14:50:43 -0500 Subject: [PATCH 447/632] DOC: add userwarning doc about mpl #35684 (#36145) --- doc/source/whatsnew/v1.2.0.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b1229a5d5823d..d7d2e3cf876ca 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -301,7 +301,7 @@ Plotting ^^^^^^^^ - Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`) -- +- meth:`DataFrame.plot` and meth:`Series.plot` raise ``UserWarning`` about usage of FixedFormatter and FixedLocator (:issue:`35684` and :issue:`35945`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ From cf1aa9eb03a86bd35c04e5e305f245493b4325d3 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 5 Sep 2020 12:55:36 -0700 Subject: [PATCH 448/632] BUG: item_cache invalidation in get_numeric_data (#35882) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/internals/managers.py | 1 - pandas/tests/frame/methods/test_cov_corr.py | 17 +++++++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index d1a66256454ca..6935a64c7572f 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -36,6 +36,7 @@ Bug fixes - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) - Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) +- Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 57a4a8c2ace8a..13bc6a2e82195 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -691,7 +691,6 @@ def get_numeric_data(self, copy: bool = False) -> "BlockManager": copy : bool, default False Whether to copy the blocks """ - self._consolidate_inplace() return self._combine([b for b in self.blocks if b.is_numeric], copy) def _combine(self: T, blocks: List[Block], copy: bool = True) -> T: diff --git a/pandas/tests/frame/methods/test_cov_corr.py b/pandas/tests/frame/methods/test_cov_corr.py index d3548b639572d..f307acd8c2178 100644 --- a/pandas/tests/frame/methods/test_cov_corr.py +++ b/pandas/tests/frame/methods/test_cov_corr.py @@ -191,6 +191,23 @@ def test_corr_nullable_integer(self, nullable_column, other_column, method): expected = pd.DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"]) tm.assert_frame_equal(result, expected) + def test_corr_item_cache(self): + # Check that corr does not lead to incorrect entries in item_cache + + df = pd.DataFrame({"A": range(10)}) + df["B"] = range(10)[::-1] + + ser = df["A"] # populate item_cache + assert len(df._mgr.blocks) == 2 + + _ = df.corr() + + # Check that the corr didnt break link between ser and df + ser.values[0] = 99 + assert df.loc[0, "A"] == 99 + assert df["A"] is ser + assert df.values[0, 0] == 99 + class TestDataFrameCorrWith: def test_corrwith(self, datetime_frame): From ce6882fd3e889b8ca05e95245dc1b29d709ca2f6 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sat, 5 Sep 2020 17:18:51 -0400 Subject: [PATCH 449/632] Make MultiIndex.get_loc raise for unhashable type (#35914) Co-authored-by: Jeff Reback --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/indexes/multi.py | 5 +++-- pandas/tests/frame/indexing/test_indexing.py | 2 +- pandas/tests/indexing/multiindex/test_multiindex.py | 8 ++++++++ pandas/tests/series/indexing/test_setitem.py | 11 ++++++++++- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 6935a64c7572f..c6cfcc6730112 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) +- Regression where :meth:`MultiIndex.get_loc` would return a slice spanning the full index when passed an empty list (:issue:`35878`) - Fix regression in invalid cache after an indexing operation; this can manifest when setting which does not update the data (:issue:`35521`) - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f66b009e6d505..080ece8547479 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2725,6 +2725,8 @@ def get_loc(self, key, method=None): "currently supported for MultiIndex" ) + hash(key) + def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != "int64": @@ -2739,8 +2741,7 @@ def _maybe_to_slice(loc): mask[loc] = True return mask - if not isinstance(key, (tuple, list)): - # not including list here breaks some indexing, xref #30892 + if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index d27487dfb8aaa..e4549dfb3e68d 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -2111,7 +2111,7 @@ def test_type_error_multiindex(self): ) dg = df.pivot_table(index="i", columns="c", values=["x", "y"]) - with pytest.raises(TypeError, match="is an invalid key"): + with pytest.raises(TypeError, match="unhashable type"): dg[:, 0] index = Index(range(2), name="i") diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py index 5e5fcd3db88d8..4565d79c632de 100644 --- a/pandas/tests/indexing/multiindex/test_multiindex.py +++ b/pandas/tests/indexing/multiindex/test_multiindex.py @@ -1,4 +1,5 @@ import numpy as np +import pytest import pandas._libs.index as _index from pandas.errors import PerformanceWarning @@ -83,3 +84,10 @@ def test_nested_tuples_duplicates(self): df3 = df.copy(deep=True) df3.loc[[(dti[0], "a")], "c2"] = 1.0 tm.assert_frame_equal(df3, expected) + + def test_multiindex_get_loc_list_raises(self): + # https://github.com/pandas-dev/pandas/issues/35878 + idx = pd.MultiIndex.from_tuples([("a", 1), ("b", 2)]) + msg = "unhashable type" + with pytest.raises(TypeError, match=msg): + idx.get_loc([]) diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 3463de25ad91b..593d1c78a19e2 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -1,6 +1,7 @@ import numpy as np -from pandas import NaT, Series, date_range +from pandas import MultiIndex, NaT, Series, date_range +import pandas.testing as tm class TestSetitemDT64Values: @@ -17,3 +18,11 @@ def test_setitem_none_nan(self): series[5:7] = np.nan assert series[6] is NaT + + def test_setitem_multiindex_empty_slice(self): + # https://github.com/pandas-dev/pandas/issues/35878 + idx = MultiIndex.from_tuples([("a", 1), ("b", 2)]) + result = Series([1, 2], index=idx) + expected = result.copy() + result.loc[[]] = 0 + tm.assert_series_equal(result, expected) From cda2f5488af0be6ad6463514435faa78cf4563d4 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sat, 5 Sep 2020 18:36:42 -0400 Subject: [PATCH 450/632] ENH: Make explode work for sets (#35637) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/_libs/reshape.pyx | 6 ++++-- pandas/core/frame.py | 7 ++++--- pandas/core/series.py | 7 ++++--- pandas/tests/frame/methods/test_explode.py | 8 ++++++++ pandas/tests/series/methods/test_explode.py | 8 ++++++++ 6 files changed, 29 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index d7d2e3cf876ca..ff9e803b4990a 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -103,7 +103,7 @@ Other enhancements - Added :meth:`~DataFrame.set_flags` for setting table-wide flags on a ``Series`` or ``DataFrame`` (:issue:`28394`) - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) -- +- :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - .. _whatsnew_120.api_breaking.python: diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index 5c6c15fb50fed..75dbb4b74aabd 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -124,7 +124,8 @@ def explode(ndarray[object] values): counts = np.zeros(n, dtype='int64') for i in range(n): v = values[i] - if c_is_list_like(v, False): + + if c_is_list_like(v, True): if len(v): counts[i] += len(v) else: @@ -138,8 +139,9 @@ def explode(ndarray[object] values): for i in range(n): v = values[i] - if c_is_list_like(v, False): + if c_is_list_like(v, True): if len(v): + v = list(v) for j in range(len(v)): result[count] = v[j] count += 1 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 29d6fb9aa7d56..150d6e24dbb86 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7091,10 +7091,11 @@ def explode( Notes ----- - This routine will explode list-likes including lists, tuples, + This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will - be object. Scalars will be returned unchanged. Empty list-likes will - result in a np.nan for that row. + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of rows in the + output will be non-deterministic when exploding sets. Examples -------- diff --git a/pandas/core/series.py b/pandas/core/series.py index d8fdaa2a60252..6cbd93135a2ca 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3829,10 +3829,11 @@ def explode(self, ignore_index: bool = False) -> "Series": Notes ----- - This routine will explode list-likes including lists, tuples, + This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will - be object. Scalars will be returned unchanged. Empty list-likes will - result in a np.nan for that row. + be object. Scalars will be returned unchanged, and empty list-likes will + result in a np.nan for that row. In addition, the ordering of elements in + the output will be non-deterministic when exploding sets. Examples -------- diff --git a/pandas/tests/frame/methods/test_explode.py b/pandas/tests/frame/methods/test_explode.py index 2bbe8ac2d5b81..bd0901387eeed 100644 --- a/pandas/tests/frame/methods/test_explode.py +++ b/pandas/tests/frame/methods/test_explode.py @@ -172,3 +172,11 @@ def test_ignore_index(): {"id": [0, 0, 10, 10], "values": list("abcd")}, index=[0, 1, 2, 3] ) tm.assert_frame_equal(result, expected) + + +def test_explode_sets(): + # https://github.com/pandas-dev/pandas/issues/35614 + df = pd.DataFrame({"a": [{"x", "y"}], "b": [1]}, index=[1]) + result = df.explode(column="a").sort_values(by="a") + expected = pd.DataFrame({"a": ["x", "y"], "b": [1, 1]}, index=[1, 1]) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_explode.py b/pandas/tests/series/methods/test_explode.py index 4b65e042f7b02..1f0fbd1cc5ecb 100644 --- a/pandas/tests/series/methods/test_explode.py +++ b/pandas/tests/series/methods/test_explode.py @@ -126,3 +126,11 @@ def test_ignore_index(): result = s.explode(ignore_index=True) expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 3], dtype=object) tm.assert_series_equal(result, expected) + + +def test_explode_sets(): + # https://github.com/pandas-dev/pandas/issues/35614 + s = pd.Series([{"a", "b", "c"}], index=[1]) + result = s.explode().sort_values() + expected = pd.Series(["a", "b", "c"], index=[1, 1, 1]) + tm.assert_series_equal(result, expected) From c688a0f4e39f362d1d88c35ab6093bc7963fc525 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sat, 5 Sep 2020 19:13:44 -0400 Subject: [PATCH 451/632] BUG: Don't raise when constructing Series from ordered set (#36054) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/construction.py | 9 ++++++--- pandas/tests/series/test_constructors.py | 10 ++++++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c6cfcc6730112..b8f6d0e52d058 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -35,6 +35,7 @@ Bug fixes - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) +- Bug in :class:`Series` constructor incorrectly raising a ``TypeError`` when passed an ordered set (:issue:`36044`) - Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) - Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) diff --git a/pandas/core/construction.py b/pandas/core/construction.py index 9d6c2789af25b..3812c306b8eb4 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -438,7 +438,12 @@ def sanitize_array( subarr = subarr.copy() return subarr - elif isinstance(data, (list, tuple)) and len(data) > 0: + elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0: + if isinstance(data, set): + # Raise only for unordered sets, e.g., not for dict_keys + raise TypeError("Set type is unordered") + data = list(data) + if dtype is not None: subarr = _try_cast(data, dtype, copy, raise_cast_failure) else: @@ -450,8 +455,6 @@ def sanitize_array( # GH#16804 arr = np.arange(data.start, data.stop, data.step, dtype="int64") subarr = _try_cast(arr, dtype, copy, raise_cast_failure) - elif isinstance(data, abc.Set): - raise TypeError("Set type is unordered") elif lib.is_scalar(data) and index is not None and dtype is not None: data = maybe_cast_to_datetime(data, dtype) if not lib.is_scalar(data): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index bcf7039ec9039..ce078059479b4 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1464,3 +1464,13 @@ def test_constructor_sparse_datetime64(self, values): arr = pd.arrays.SparseArray(values, dtype=dtype) expected = pd.Series(arr) tm.assert_series_equal(result, expected) + + def test_construction_from_ordered_collection(self): + # https://github.com/pandas-dev/pandas/issues/36044 + result = Series({"a": 1, "b": 2}.keys()) + expected = Series(["a", "b"]) + tm.assert_series_equal(result, expected) + + result = Series({"a": 1, "b": 2}.values()) + expected = Series([1, 2]) + tm.assert_series_equal(result, expected) From 337f3ca60e222baedbd35332bcd0dbd8fe4d08ed Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sun, 6 Sep 2020 18:58:32 +0200 Subject: [PATCH 452/632] REGR: append tz-aware DataFrame with tz-naive values (#36115) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/dtypes/concat.py | 6 ++++-- pandas/core/internals/concat.py | 8 ++++++-- pandas/tests/reshape/test_concat.py | 17 +++++++++++++++++ 4 files changed, 28 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index b8f6d0e52d058..f0adc951a5f99 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -16,6 +16,7 @@ Fixed regressions ~~~~~~~~~~~~~~~~~ - Regression in :meth:`DatetimeIndex.intersection` incorrectly raising ``AssertionError`` when intersecting against a list (:issue:`35876`) - Fix regression in updating a column inplace (e.g. using ``df['col'].fillna(.., inplace=True)``) (:issue:`35731`) +- Fix regression in :meth:`DataFrame.append` mixing tz-aware and tz-naive datetime columns (:issue:`35460`) - Performance regression for :meth:`RangeIndex.format` (:issue:`35712`) - Regression where :meth:`MultiIndex.get_loc` would return a slice spanning the full index when passed an empty list (:issue:`35878`) - Fix regression in invalid cache after an indexing operation; this can manifest when setting which does not update the data (:issue:`35521`) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 9902016475b22..dd005752a4832 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -148,15 +148,17 @@ def is_nonempty(x) -> bool: any_ea = any(is_extension_array_dtype(x.dtype) for x in to_concat) if any_ea: + # we ignore axis here, as internally concatting with EAs is always + # for axis=0 if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) to_concat = [_cast_to_common_type(arr, target_dtype) for arr in to_concat] - if isinstance(to_concat[0], ExtensionArray) and axis == 0: + if isinstance(to_concat[0], ExtensionArray): cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: - return np.concatenate(to_concat, axis=axis) + return np.concatenate(to_concat) elif _contains_datetime or "timedelta" in typs: return concat_datetime(to_concat, axis=axis, typs=typs) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index b45f0890cafa4..513c5fed1ca62 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -24,7 +24,7 @@ from pandas.core.dtypes.missing import isna import pandas.core.algorithms as algos -from pandas.core.arrays import ExtensionArray +from pandas.core.arrays import DatetimeArray, ExtensionArray from pandas.core.internals.blocks import make_block from pandas.core.internals.managers import BlockManager @@ -335,9 +335,13 @@ def _concatenate_join_units(join_units, concat_axis, copy): # the non-EA values are 2D arrays with shape (1, n) to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat] concat_values = concat_compat(to_concat, axis=0) - if not isinstance(concat_values, ExtensionArray): + if not isinstance(concat_values, ExtensionArray) or ( + isinstance(concat_values, DatetimeArray) and concat_values.tz is None + ): # if the result of concat is not an EA but an ndarray, reshape to # 2D to put it a non-EA Block + # special case DatetimeArray, which *is* an EA, but is put in a + # consolidated 2D block concat_values = np.atleast_2d(concat_values) else: concat_values = concat_compat(to_concat, axis=concat_axis) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 38cf2cc2402a1..90705f827af25 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1110,6 +1110,23 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): result = df.append([s, s], ignore_index=True) tm.assert_frame_equal(result, expected) + def test_append_empty_tz_frame_with_datetime64ns(self): + # https://github.com/pandas-dev/pandas/issues/35460 + df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + + # pd.NaT gets inferred as tz-naive, so append result is tz-naive + result = df.append({"a": pd.NaT}, ignore_index=True) + expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") + tm.assert_frame_equal(result, expected) + + # also test with typed value to append + df = pd.DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + result = df.append( + pd.Series({"a": pd.NaT}, dtype="datetime64[ns]"), ignore_index=True + ) + expected = pd.DataFrame({"a": [pd.NaT]}).astype("datetime64[ns]") + tm.assert_frame_equal(result, expected) + class TestConcatenate: def test_concat_copy(self): From 92280664969ae9c6293436059d382b03a90b0826 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sun, 6 Sep 2020 12:59:43 -0400 Subject: [PATCH 453/632] BUG: Respect errors="ignore" during extension astype (#35979) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/internals/blocks.py | 9 ++++++-- pandas/tests/frame/methods/test_astype.py | 22 +++++++++++++++++++ pandas/tests/series/methods/test_astype.py | 25 +++++++++++++++++++++- 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index f0adc951a5f99..1e946d325ace1 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -34,6 +34,7 @@ Bug fixes - Bug in :meth:`DataFrame.eval` with ``object`` dtype column binary operations (:issue:`35794`) - Bug in :class:`Series` constructor raising a ``TypeError`` when constructing sparse datetime64 dtypes (:issue:`35762`) - Bug in :meth:`DataFrame.apply` with ``result_type="reduce"`` returning with incorrect index (:issue:`35683`) +- Bug in :meth:`Series.astype` and :meth:`DataFrame.astype` not respecting the ``errors`` argument when set to ``"ignore"`` for extension dtypes (:issue:`35471`) - Bug in :meth:`DateTimeIndex.format` and :meth:`PeriodIndex.format` with ``name=True`` setting the first item to ``"None"`` where it should be ``""`` (:issue:`35712`) - Bug in :meth:`Float64Index.__contains__` incorrectly raising ``TypeError`` instead of returning ``False`` (:issue:`35788`) - Bug in :class:`Series` constructor incorrectly raising a ``TypeError`` when passed an ordered set (:issue:`36044`) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f4e535dc787d..263c7c2b6940a 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -581,8 +581,13 @@ def astype(self, dtype, copy: bool = False, errors: str = "raise"): # force the copy here if self.is_extension: - # TODO: Should we try/except this astype? - values = self.values.astype(dtype) + try: + values = self.values.astype(dtype) + except (ValueError, TypeError): + if errors == "ignore": + values = self.values + else: + raise else: if issubclass(dtype.type, str): diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index b0fd0496ea81e..d3f256259b15f 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -8,6 +8,7 @@ CategoricalDtype, DataFrame, DatetimeTZDtype, + Interval, IntervalDtype, NaT, Series, @@ -565,3 +566,24 @@ def test_astype_empty_dtype_dict(self): result = df.astype(dict()) tm.assert_frame_equal(result, df) assert result is not df + + @pytest.mark.parametrize( + "df", + [ + DataFrame(Series(["x", "y", "z"], dtype="string")), + DataFrame(Series(["x", "y", "z"], dtype="category")), + DataFrame(Series(3 * [Timestamp("2020-01-01", tz="UTC")])), + DataFrame(Series(3 * [Interval(0, 1)])), + ], + ) + @pytest.mark.parametrize("errors", ["raise", "ignore"]) + def test_astype_ignores_errors_for_extension_dtypes(self, df, errors): + # https://github.com/pandas-dev/pandas/issues/35471 + if errors == "ignore": + expected = df + result = df.astype(float, errors=errors) + tm.assert_frame_equal(result, expected) + else: + msg = "(Cannot cast)|(could not convert)" + with pytest.raises((ValueError, TypeError), match=msg): + df.astype(float, errors=errors) diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index 9fdc4179de2e1..b9d90a9fc63dd 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -1,4 +1,6 @@ -from pandas import Series, date_range +import pytest + +from pandas import Interval, Series, Timestamp, date_range import pandas._testing as tm @@ -23,3 +25,24 @@ def test_astype_dt64tz_to_str(self): dtype=object, ) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + Series(["x", "y", "z"], dtype="string"), + Series(["x", "y", "z"], dtype="category"), + Series(3 * [Timestamp("2020-01-01", tz="UTC")]), + Series(3 * [Interval(0, 1)]), + ], + ) + @pytest.mark.parametrize("errors", ["raise", "ignore"]) + def test_astype_ignores_errors_for_extension_dtypes(self, values, errors): + # https://github.com/pandas-dev/pandas/issues/35471 + if errors == "ignore": + expected = values + result = values.astype(float, errors="ignore") + tm.assert_series_equal(result, expected) + else: + msg = "(Cannot cast)|(could not convert)" + with pytest.raises((ValueError, TypeError), match=msg): + values.astype(float, errors=errors) From a7e39b83e18c8345b2d0e841fba27149fbf4badd Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 6 Sep 2020 10:05:35 -0700 Subject: [PATCH 454/632] De-privatize imported names (#36156) --- pandas/_libs/hashtable.pyx | 4 ++-- pandas/_libs/hashtable_class_helper.pxi.in | 6 +++--- pandas/_libs/hashtable_func_helper.pxi.in | 2 +- pandas/_libs/parsers.pyx | 8 ++++---- pandas/_testing.py | 8 ++++---- pandas/compat/__init__.py | 4 ++-- pandas/core/algorithms.py | 4 ++-- pandas/core/arrays/base.py | 4 ++-- pandas/core/arrays/masked.py | 4 ++-- pandas/core/computation/check.py | 10 +++++----- pandas/core/computation/eval.py | 6 +++--- pandas/core/computation/expressions.py | 10 +++++----- pandas/core/computation/ops.py | 6 +++--- pandas/core/frame.py | 4 ++-- pandas/core/indexes/multi.py | 4 ++-- pandas/core/internals/__init__.py | 4 ++-- pandas/core/internals/blocks.py | 4 ++-- pandas/core/internals/managers.py | 6 +++--- pandas/core/sorting.py | 2 +- pandas/core/window/common.py | 4 ++-- pandas/core/window/ewm.py | 6 +++--- pandas/core/window/rolling.py | 6 +++--- pandas/io/common.py | 6 +++--- pandas/io/excel/_base.py | 2 +- pandas/io/excel/_odfreader.py | 4 ++-- pandas/io/excel/_openpyxl.py | 4 ++-- pandas/io/excel/_pyxlsb.py | 4 ++-- pandas/io/excel/_xlrd.py | 4 ++-- pandas/io/formats/format.py | 8 ++++---- pandas/io/formats/printing.py | 4 ++-- pandas/tests/computation/test_compat.py | 6 +++--- pandas/tests/computation/test_eval.py | 12 ++++++------ pandas/tests/extension/json/array.py | 2 +- pandas/tests/frame/test_arithmetic.py | 4 ++-- pandas/tests/frame/test_query_eval.py | 6 +++--- pandas/tests/io/formats/test_format.py | 2 +- pandas/tests/io/test_pickle.py | 6 +++--- pandas/tests/test_algos.py | 2 +- .../moments/test_moments_consistency_rolling.py | 4 ++-- pandas/tests/window/test_pairwise.py | 2 +- pandas/util/_test_decorators.py | 4 ++-- 41 files changed, 101 insertions(+), 101 deletions(-) diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index ffaf6d6505955..5a0cddb0af197 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -56,7 +56,7 @@ from pandas._libs.missing cimport checknull cdef int64_t NPY_NAT = util.get_nat() -_SIZE_HINT_LIMIT = (1 << 20) + 7 +SIZE_HINT_LIMIT = (1 << 20) + 7 cdef Py_ssize_t _INIT_VEC_CAP = 128 @@ -176,7 +176,7 @@ def unique_label_indices(const int64_t[:] labels): ndarray[int64_t, ndim=1] arr Int64VectorData *ud = idx.data - kh_resize_int64(table, min(n, _SIZE_HINT_LIMIT)) + kh_resize_int64(table, min(n, SIZE_HINT_LIMIT)) with nogil: for i in range(n): diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index e0e026fe7cb5e..5e4da96d57e42 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -268,7 +268,7 @@ cdef class {{name}}HashTable(HashTable): def __cinit__(self, int64_t size_hint=1): self.table = kh_init_{{dtype}}() if size_hint is not None: - size_hint = min(size_hint, _SIZE_HINT_LIMIT) + size_hint = min(size_hint, SIZE_HINT_LIMIT) kh_resize_{{dtype}}(self.table, size_hint) def __len__(self) -> int: @@ -603,7 +603,7 @@ cdef class StringHashTable(HashTable): def __init__(self, int64_t size_hint=1): self.table = kh_init_str() if size_hint is not None: - size_hint = min(size_hint, _SIZE_HINT_LIMIT) + size_hint = min(size_hint, SIZE_HINT_LIMIT) kh_resize_str(self.table, size_hint) def __dealloc__(self): @@ -916,7 +916,7 @@ cdef class PyObjectHashTable(HashTable): def __init__(self, int64_t size_hint=1): self.table = kh_init_pymap() if size_hint is not None: - size_hint = min(size_hint, _SIZE_HINT_LIMIT) + size_hint = min(size_hint, SIZE_HINT_LIMIT) kh_resize_pymap(self.table, size_hint) def __dealloc__(self): diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 0cc0a6b192df5..fcd081f563f92 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -138,7 +138,7 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'): kh_{{ttype}}_t *table = kh_init_{{ttype}}() ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool') - kh_resize_{{ttype}}(table, min(n, _SIZE_HINT_LIMIT)) + kh_resize_{{ttype}}(table, min(n, SIZE_HINT_LIMIT)) if keep not in ('last', 'first', False): raise ValueError('keep must be either "first", "last" or False') diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index fa77af6bd5a25..811e28b830921 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -67,7 +67,7 @@ from pandas._libs.khash cimport ( khiter_t, ) -from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat import get_lzma_file, import_lzma from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning from pandas.core.dtypes.common import ( @@ -82,7 +82,7 @@ from pandas.core.dtypes.common import ( ) from pandas.core.dtypes.concat import union_categoricals -lzma = _import_lzma() +lzma = import_lzma() cdef: float64_t INF = np.inf @@ -638,9 +638,9 @@ cdef class TextReader: f'zip file {zip_names}') elif self.compression == 'xz': if isinstance(source, str): - source = _get_lzma_file(lzma)(source, 'rb') + source = get_lzma_file(lzma)(source, 'rb') else: - source = _get_lzma_file(lzma)(filename=source) + source = get_lzma_file(lzma)(filename=source) else: raise ValueError(f'Unrecognized compression type: ' f'{self.compression}') diff --git a/pandas/_testing.py b/pandas/_testing.py index 04d36749a3d8c..7dba578951deb 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -25,7 +25,7 @@ from pandas._libs.lib import no_default import pandas._libs.testing as _testing from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries -from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat import get_lzma_file, import_lzma from pandas.core.dtypes.common import ( is_bool, @@ -70,7 +70,7 @@ from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing -lzma = _import_lzma() +lzma = import_lzma() _N = 30 _K = 4 @@ -243,7 +243,7 @@ def decompress_file(path, compression): elif compression == "bz2": f = bz2.BZ2File(path, "rb") elif compression == "xz": - f = _get_lzma_file(lzma)(path, "rb") + f = get_lzma_file(lzma)(path, "rb") elif compression == "zip": zip_file = zipfile.ZipFile(path) zip_names = zip_file.namelist() @@ -288,7 +288,7 @@ def write_to_compressed(compression, path, data, dest="test"): elif compression == "bz2": compress_method = bz2.BZ2File elif compression == "xz": - compress_method = _get_lzma_file(lzma) + compress_method = get_lzma_file(lzma) else: raise ValueError(f"Unrecognized compression type: {compression}") diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index f2018a5c01711..57e378758cc78 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -77,7 +77,7 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" -def _import_lzma(): +def import_lzma(): """ Importing the `lzma` module. @@ -97,7 +97,7 @@ def _import_lzma(): warnings.warn(msg) -def _get_lzma_file(lzma): +def get_lzma_file(lzma): """ Importing the `LZMAFile` class from the `lzma` module. diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 50ec3714f454b..57e63daff29e4 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -462,7 +462,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: return f(comps, values) -def _factorize_array( +def factorize_array( values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None ) -> Tuple[np.ndarray, np.ndarray]: """ @@ -671,7 +671,7 @@ def factorize( else: na_value = None - codes, uniques = _factorize_array( + codes, uniques = factorize_array( values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value ) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 8193d65b3b30c..0c8efda5fc588 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -31,7 +31,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ops -from pandas.core.algorithms import _factorize_array, unique +from pandas.core.algorithms import factorize_array, unique from pandas.core.missing import backfill_1d, pad_1d from pandas.core.sorting import nargminmax, nargsort @@ -845,7 +845,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray" # Complete control over factorization. arr, na_value = self._values_for_factorize() - codes, uniques = _factorize_array( + codes, uniques = factorize_array( arr, na_sentinel=na_sentinel, na_value=na_value ) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 1237dea5c1a64..31274232e2525 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -17,7 +17,7 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core import nanops -from pandas.core.algorithms import _factorize_array, take +from pandas.core.algorithms import factorize_array, take from pandas.core.array_algos import masked_reductions from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin from pandas.core.indexers import check_array_indexer @@ -287,7 +287,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]: arr = self._data mask = self._mask - codes, uniques = _factorize_array(arr, na_sentinel=na_sentinel, mask=mask) + codes, uniques = factorize_array(arr, na_sentinel=na_sentinel, mask=mask) # the hashtables don't handle all different types of bits uniques = uniques.astype(self.dtype.numpy_dtype, copy=False) diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py index 4d205909b9e2e..6c7261b3b33c9 100644 --- a/pandas/core/computation/check.py +++ b/pandas/core/computation/check.py @@ -1,10 +1,10 @@ from pandas.compat._optional import import_optional_dependency ne = import_optional_dependency("numexpr", raise_on_missing=False, on_version="warn") -_NUMEXPR_INSTALLED = ne is not None -if _NUMEXPR_INSTALLED: - _NUMEXPR_VERSION = ne.__version__ +NUMEXPR_INSTALLED = ne is not None +if NUMEXPR_INSTALLED: + NUMEXPR_VERSION = ne.__version__ else: - _NUMEXPR_VERSION = None + NUMEXPR_VERSION = None -__all__ = ["_NUMEXPR_INSTALLED", "_NUMEXPR_VERSION"] +__all__ = ["NUMEXPR_INSTALLED", "NUMEXPR_VERSION"] diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index b74f99fca21c7..f6a7935142a32 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -38,10 +38,10 @@ def _check_engine(engine: Optional[str]) -> str: str Engine name. """ - from pandas.core.computation.check import _NUMEXPR_INSTALLED + from pandas.core.computation.check import NUMEXPR_INSTALLED if engine is None: - engine = "numexpr" if _NUMEXPR_INSTALLED else "python" + engine = "numexpr" if NUMEXPR_INSTALLED else "python" if engine not in _engines: valid_engines = list(_engines.keys()) @@ -53,7 +53,7 @@ def _check_engine(engine: Optional[str]) -> str: # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == "numexpr": - if not _NUMEXPR_INSTALLED: + if not NUMEXPR_INSTALLED: raise ImportError( "'numexpr' is not installed or an unsupported version. Cannot use " "engine='numexpr' for query/eval if 'numexpr' is not installed" diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index a9c0cb0571446..d2c08c343ab4b 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -15,15 +15,15 @@ from pandas.core.dtypes.generic import ABCDataFrame -from pandas.core.computation.check import _NUMEXPR_INSTALLED +from pandas.core.computation.check import NUMEXPR_INSTALLED from pandas.core.ops import roperator -if _NUMEXPR_INSTALLED: +if NUMEXPR_INSTALLED: import numexpr as ne _TEST_MODE = None _TEST_RESULT: List[bool] = list() -_USE_NUMEXPR = _NUMEXPR_INSTALLED +_USE_NUMEXPR = NUMEXPR_INSTALLED _evaluate = None _where = None @@ -40,7 +40,7 @@ def set_use_numexpr(v=True): # set/unset to use numexpr global _USE_NUMEXPR - if _NUMEXPR_INSTALLED: + if NUMEXPR_INSTALLED: _USE_NUMEXPR = v # choose what we are going to do @@ -53,7 +53,7 @@ def set_use_numexpr(v=True): def set_numexpr_threads(n=None): # if we are using numexpr, set the threads to n # otherwise reset - if _NUMEXPR_INSTALLED and _USE_NUMEXPR: + if NUMEXPR_INSTALLED and _USE_NUMEXPR: if n is None: n = ne.detect_number_of_cores() ne.set_num_threads(n) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index b2144c45c6323..1fb3910b8577d 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -600,11 +600,11 @@ def __repr__(self) -> str: class FuncNode: def __init__(self, name: str): - from pandas.core.computation.check import _NUMEXPR_INSTALLED, _NUMEXPR_VERSION + from pandas.core.computation.check import NUMEXPR_INSTALLED, NUMEXPR_VERSION if name not in _mathops or ( - _NUMEXPR_INSTALLED - and _NUMEXPR_VERSION < LooseVersion("2.6.9") + NUMEXPR_INSTALLED + and NUMEXPR_VERSION < LooseVersion("2.6.9") and name in ("floor", "ceil") ): raise ValueError(f'"{name}" is not a supported function') diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 150d6e24dbb86..e1a889bf79d95 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5257,7 +5257,7 @@ def duplicated( 4 True dtype: bool """ - from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64 + from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64 from pandas.core.sorting import get_group_index @@ -5266,7 +5266,7 @@ def duplicated( def f(vals): labels, shape = algorithms.factorize( - vals, size_hint=min(len(self), _SIZE_HINT_LIMIT) + vals, size_hint=min(len(self), SIZE_HINT_LIMIT) ) return labels.astype("i8", copy=False), len(shape) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 080ece8547479..e49a23935efbd 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1342,9 +1342,9 @@ def format( ) if adjoin: - from pandas.io.formats.format import _get_adjustment + from pandas.io.formats.format import get_adjustment - adj = _get_adjustment() + adj = get_adjustment() return adj.adjoin(space, *result_levels).split("\n") else: return result_levels diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index e12e0d7760ea7..fbccac1c2af67 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -10,8 +10,8 @@ IntBlock, ObjectBlock, TimeDeltaBlock, - _safe_reshape, make_block, + safe_reshape, ) from pandas.core.internals.concat import concatenate_block_managers from pandas.core.internals.managers import ( @@ -33,7 +33,7 @@ "IntBlock", "ObjectBlock", "TimeDeltaBlock", - "_safe_reshape", + "safe_reshape", "make_block", "BlockManager", "SingleBlockManager", diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 263c7c2b6940a..c8da04fbbf987 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1678,7 +1678,7 @@ def putmask( if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): new = new[mask] - mask = _safe_reshape(mask, new_values.shape) + mask = safe_reshape(mask, new_values.shape) new_values[mask] = new return [self.make_block(values=new_values)] @@ -2820,7 +2820,7 @@ def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: return values -def _safe_reshape(arr, new_shape): +def safe_reshape(arr, new_shape): """ If possible, reshape `arr` to have shape `new_shape`, with a couple of exceptions (see gh-13012): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 13bc6a2e82195..3f446874ffd0e 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -47,10 +47,10 @@ DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, - _safe_reshape, extend_blocks, get_block_type, make_block, + safe_reshape, ) from pandas.core.internals.ops import blockwise_all, operate_blockwise @@ -1015,7 +1015,7 @@ def value_getitem(placement): else: if value.ndim == self.ndim - 1: - value = _safe_reshape(value, (1,) + value.shape) + value = safe_reshape(value, (1,) + value.shape) def value_getitem(placement): return value @@ -1138,7 +1138,7 @@ def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False): if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype): # TODO(EA2D): special case not needed with 2D EAs - value = _safe_reshape(value, (1,) + value.shape) + value = safe_reshape(value, (1,) + value.shape) block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 8bdd466ae6f33..d03b2f29521b7 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -520,7 +520,7 @@ def compress_group_index(group_index, sort: bool = True): space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). """ - size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT) + size_hint = min(len(group_index), hashtable.SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) group_index = ensure_int64(group_index) diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 2f3058db4493b..df60d2dcf5e84 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -92,7 +92,7 @@ def f(x, name=name, *args): return self._groupby.apply(f) -def _flex_binary_moment(arg1, arg2, f, pairwise=False): +def flex_binary_moment(arg1, arg2, f, pairwise=False): if not ( isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) @@ -222,7 +222,7 @@ def dataframe_from_int_dict(data, frame_template): return dataframe_from_int_dict(results, arg1) else: - return _flex_binary_moment(arg2, arg1, f) + return flex_binary_moment(arg2, arg1, f) def zsqrt(x): diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 1913b51a68c15..2bd36d8bff155 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -15,7 +15,7 @@ import pandas.core.common as common from pandas.core.window.common import _doc_template, _shared_docs, zsqrt -from pandas.core.window.rolling import _flex_binary_moment, _Rolling +from pandas.core.window.rolling import _Rolling, flex_binary_moment _bias_template = """ Parameters @@ -416,7 +416,7 @@ def _get_cov(X, Y): ) return X._wrap_result(cov) - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) ) @@ -470,6 +470,6 @@ def _cov(x, y): corr = cov / zsqrt(x_var * y_var) return X._wrap_result(corr) - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) ) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 558c0eeb0ea65..4c4ec4d700b7f 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -54,8 +54,8 @@ from pandas.core.window.common import ( WindowGroupByMixin, _doc_template, - _flex_binary_moment, _shared_docs, + flex_binary_moment, zsqrt, ) from pandas.core.window.indexers import ( @@ -1774,7 +1774,7 @@ def _get_cov(X, Y): bias_adj = count / (count - ddof) return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) ) @@ -1913,7 +1913,7 @@ def _get_corr(a, b): return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs)) - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) ) diff --git a/pandas/io/common.py b/pandas/io/common.py index a80b89569f429..3f130401558dd 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -40,12 +40,12 @@ ModeVar, StorageOptions, ) -from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat import get_lzma_file, import_lzma from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_file_like -lzma = _import_lzma() +lzma = import_lzma() _VALID_URLS = set(uses_relative + uses_netloc + uses_params) @@ -562,7 +562,7 @@ def get_handle( # XZ Compression elif compression == "xz": - f = _get_lzma_file(lzma)(path_or_buf, mode) + f = get_lzma_file(lzma)(path_or_buf, mode) # Unrecognized Compression else: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 74eb65521f5b2..87343c22ad4e9 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -346,7 +346,7 @@ def read_excel( ) -class _BaseExcelReader(metaclass=abc.ABCMeta): +class BaseExcelReader(metaclass=abc.ABCMeta): def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): # If filepath_or_buffer is a url, load the data into a BytesIO if is_url(filepath_or_buffer): diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 6cbca59aed97e..02575ab878f6e 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -7,10 +7,10 @@ import pandas as pd -from pandas.io.excel._base import _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader -class _ODFReader(_BaseExcelReader): +class _ODFReader(BaseExcelReader): """ Read tables out of OpenDocument formatted files. diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 89b581da6ed31..f395127902101 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -5,7 +5,7 @@ from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency -from pandas.io.excel._base import ExcelWriter, _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader, ExcelWriter from pandas.io.excel._util import validate_freeze_panes if TYPE_CHECKING: @@ -438,7 +438,7 @@ def write_cells( setattr(xcell, k, v) -class _OpenpyxlReader(_BaseExcelReader): +class _OpenpyxlReader(BaseExcelReader): def __init__( self, filepath_or_buffer: FilePathOrBuffer, diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index c15a52abe4d53..069c3a2eaa643 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -3,10 +3,10 @@ from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency -from pandas.io.excel._base import _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader -class _PyxlsbReader(_BaseExcelReader): +class _PyxlsbReader(BaseExcelReader): def __init__( self, filepath_or_buffer: FilePathOrBuffer, diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index a7fb519af61c6..9057106fb08e5 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -5,10 +5,10 @@ from pandas._typing import StorageOptions from pandas.compat._optional import import_optional_dependency -from pandas.io.excel._base import _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader -class _XlrdReader(_BaseExcelReader): +class _XlrdReader(BaseExcelReader): def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): """ Reader using xlrd engine. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3dc4290953360..53b2b533215f0 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -256,7 +256,7 @@ def __init__( float_format = get_option("display.float_format") self.float_format = float_format self.dtype = dtype - self.adj = _get_adjustment() + self.adj = get_adjustment() self._chk_truncate() @@ -439,7 +439,7 @@ def _get_pad(t): return [x.rjust(_get_pad(x)) for x in texts] -def _get_adjustment() -> TextAdjustment: +def get_adjustment() -> TextAdjustment: use_east_asian_width = get_option("display.unicode.east_asian_width") if use_east_asian_width: return EastAsianTextAdjustment() @@ -628,7 +628,7 @@ def __init__( self.columns = frame.columns self._chk_truncate() - self.adj = _get_adjustment() + self.adj = get_adjustment() def _chk_truncate(self) -> None: """ @@ -1733,7 +1733,7 @@ def _make_fixed_width( return strings if adj is None: - adj = _get_adjustment() + adj = get_adjustment() max_len = max(adj.len(x) for x in strings) diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 23daab725ec65..edc6fbfff61d7 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -321,7 +321,7 @@ def format_object_summary( summary string """ from pandas.io.formats.console import get_console_size - from pandas.io.formats.format import _get_adjustment + from pandas.io.formats.format import get_adjustment display_width, _ = get_console_size() if display_width is None: @@ -350,7 +350,7 @@ def format_object_summary( is_truncated = n > max_seq_items # adj can optionally handle unicode eastern asian width - adj = _get_adjustment() + adj = get_adjustment() def _extend_line( s: str, line: str, value: str, display_width: int, next_line_prefix: str diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index b3fbd8c17d8bf..ead102f532a20 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -12,16 +12,16 @@ def test_compat(): # test we have compat with our version of nu - from pandas.core.computation.check import _NUMEXPR_INSTALLED + from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if LooseVersion(ver) < LooseVersion(VERSIONS["numexpr"]): - assert not _NUMEXPR_INSTALLED + assert not NUMEXPR_INSTALLED else: - assert _NUMEXPR_INSTALLED + assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 853ab00853d1b..49066428eb16c 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -18,7 +18,7 @@ from pandas import DataFrame, Series, compat, date_range import pandas._testing as tm from pandas.core.computation import pytables -from pandas.core.computation.check import _NUMEXPR_VERSION +from pandas.core.computation.check import NUMEXPR_VERSION from pandas.core.computation.engines import NumExprClobberingError, _engines import pandas.core.computation.expr as expr from pandas.core.computation.expr import ( @@ -26,7 +26,7 @@ PandasExprVisitor, PythonExprVisitor, ) -from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR +from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED from pandas.core.computation.ops import ( _arith_ops_syms, _binary_math_ops, @@ -43,7 +43,7 @@ marks=pytest.mark.skipif( engine == "numexpr" and not _USE_NUMEXPR, reason=f"numexpr enabled->{_USE_NUMEXPR}, " - f"installed->{_NUMEXPR_INSTALLED}", + f"installed->{NUMEXPR_INSTALLED}", ), ) for engine in _engines @@ -60,15 +60,15 @@ def parser(request): @pytest.fixture def ne_lt_2_6_9(): - if _NUMEXPR_INSTALLED and _NUMEXPR_VERSION >= LooseVersion("2.6.9"): + if NUMEXPR_INSTALLED and NUMEXPR_VERSION >= LooseVersion("2.6.9"): pytest.skip("numexpr is >= 2.6.9") return "numexpr" @pytest.fixture def unary_fns_for_ne(): - if _NUMEXPR_INSTALLED: - if _NUMEXPR_VERSION >= LooseVersion("2.6.9"): + if NUMEXPR_INSTALLED: + if NUMEXPR_VERSION >= LooseVersion("2.6.9"): return _unary_math_ops else: return tuple(x for x in _unary_math_ops if x not in ("floor", "ceil")) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 447a6108fc3c7..e3cdeb9c1951f 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -189,7 +189,7 @@ def _concat_same_type(cls, to_concat): def _values_for_factorize(self): frozen = self._values_for_argsort() if len(frozen) == 0: - # _factorize_array expects 1-d array, this is a len-0 2-d array. + # factorize_array expects 1-d array, this is a len-0 2-d array. frozen = frozen.ravel() return frozen, () diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index e17357e9845b5..70d0b4e9e835c 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -11,7 +11,7 @@ from pandas import DataFrame, MultiIndex, Series import pandas._testing as tm import pandas.core.common as com -from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED +from pandas.core.computation.expressions import _MIN_ELEMENTS, NUMEXPR_INSTALLED from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int # ------------------------------------------------------------------- @@ -375,7 +375,7 @@ def test_floordiv_axis0(self): result2 = df.floordiv(ser.values, axis=0) tm.assert_frame_equal(result2, expected) - @pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed") + @pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed") @pytest.mark.parametrize("opname", ["floordiv", "pow"]) def test_floordiv_axis0_numexpr_path(self, opname): # case that goes through numexpr and has to fall back to masked_arith_op diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 56d178daee7fd..2994482fa5139 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, date_range import pandas._testing as tm -from pandas.core.computation.check import _NUMEXPR_INSTALLED +from pandas.core.computation.check import NUMEXPR_INSTALLED PARSERS = "python", "pandas" ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne) @@ -39,7 +39,7 @@ def setup_method(self, method): def test_query_default(self): # GH 12749 - # this should always work, whether _NUMEXPR_INSTALLED or not + # this should always work, whether NUMEXPR_INSTALLED or not df = self.df result = df.query("A>0") tm.assert_frame_equal(result, self.expected1) @@ -65,7 +65,7 @@ def test_query_python(self): def test_query_numexpr(self): df = self.df - if _NUMEXPR_INSTALLED: + if NUMEXPR_INSTALLED: result = df.query("A>0", engine="numexpr") tm.assert_frame_equal(result, self.expected1) result = df.eval("A+1", engine="numexpr") diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 22942ed75d0f3..1fb957505987f 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -226,7 +226,7 @@ def test_repr_truncation(self): r = repr(df) r = r[r.find("\n") + 1 :] - adj = fmt._get_adjustment() + adj = fmt.get_adjustment() for line, value in zip(r.split("\n"), df["B"]): if adj.len(value) + 1 > max_len: diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index d1c6705dd7a6f..2241fe7013568 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -24,7 +24,7 @@ import pytest -from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian +from pandas.compat import get_lzma_file, import_lzma, is_platform_little_endian import pandas.util._test_decorators as td import pandas as pd @@ -33,7 +33,7 @@ from pandas.tseries.offsets import Day, MonthEnd -lzma = _import_lzma() +lzma = import_lzma() @pytest.fixture(scope="module") @@ -268,7 +268,7 @@ def compress_file(self, src_path, dest_path, compression): with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: f.write(src_path, os.path.basename(src_path)) elif compression == "xz": - f = _get_lzma_file(lzma)(dest_path, "w") + f = get_lzma_file(lzma)(dest_path, "w") else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 72a679d980641..ec7413514d430 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -303,7 +303,7 @@ def test_parametrized_factorize_na_value_default(self, data): ], ) def test_parametrized_factorize_na_value(self, data, na_value): - codes, uniques = algos._factorize_array(data, na_value=na_value) + codes, uniques = algos.factorize_array(data, na_value=na_value) expected_uniques = data[[1, 3]] expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(codes, expected_codes) diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index 158b994cf03ae..dfcbdde466d44 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Index, Series import pandas._testing as tm -from pandas.core.window.common import _flex_binary_moment +from pandas.core.window.common import flex_binary_moment from pandas.tests.window.common import ( check_pairwise_moment, moments_consistency_cov_data, @@ -150,7 +150,7 @@ def test_flex_binary_moment(): # don't blow the stack msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame" with pytest.raises(TypeError, match=msg): - _flex_binary_moment(5, 6, None) + flex_binary_moment(5, 6, None) def test_corr_sanity(): diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py index 7425cc5df4c2f..7f4e85b385b2d 100644 --- a/pandas/tests/window/test_pairwise.py +++ b/pandas/tests/window/test_pairwise.py @@ -41,7 +41,7 @@ def compare(self, result, expected): @pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()]) def test_no_flex(self, f): - # DataFrame methods (which do not call _flex_binary_moment()) + # DataFrame methods (which do not call flex_binary_moment()) results = [f(df) for df in self.df1s] for (df, result) in zip(self.df1s, results): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 78facd6694635..94c252eca1671 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -35,7 +35,7 @@ def test_foo(): from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import _np_version -from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR +from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED def safe_import(mod_name: str, min_version: Optional[str] = None): @@ -196,7 +196,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None): ) skip_if_no_ne = pytest.mark.skipif( not _USE_NUMEXPR, - reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{_NUMEXPR_INSTALLED}", + reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{NUMEXPR_INSTALLED}", ) From 2b722032c6975e349b30453e0fc2564d885c1a4c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 6 Sep 2020 10:08:26 -0700 Subject: [PATCH 455/632] REF: share more EA methods (#36154) --- pandas/core/arrays/_mixins.py | 33 +++++++- pandas/core/arrays/categorical.py | 126 ++--------------------------- pandas/core/arrays/datetimelike.py | 28 ++----- pandas/core/arrays/numpy_.py | 12 +-- 4 files changed, 45 insertions(+), 154 deletions(-) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 2976747d66dfa..8b79f8ce66756 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -4,9 +4,10 @@ from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError -from pandas.util._decorators import cache_readonly +from pandas.util._decorators import cache_readonly, doc -from pandas.core.algorithms import take, unique +from pandas.core.algorithms import searchsorted, take, unique +from pandas.core.array_algos.transforms import shift from pandas.core.arrays.base import ExtensionArray _T = TypeVar("_T", bound="NDArrayBackedExtensionArray") @@ -120,3 +121,31 @@ def repeat(self: _T, repeats, axis=None) -> _T: def unique(self: _T) -> _T: new_data = unique(self._ndarray) return self._from_backing_data(new_data) + + @classmethod + @doc(ExtensionArray._concat_same_type) + def _concat_same_type(cls, to_concat, axis: int = 0): + dtypes = {str(x.dtype) for x in to_concat} + if len(dtypes) != 1: + raise ValueError("to_concat must have the same dtype (tz)", dtypes) + + new_values = [x._ndarray for x in to_concat] + new_values = np.concatenate(new_values, axis=axis) + return to_concat[0]._from_backing_data(new_values) + + @doc(ExtensionArray.searchsorted) + def searchsorted(self, value, side="left", sorter=None): + return searchsorted(self._ndarray, value, side=side, sorter=sorter) + + @doc(ExtensionArray.shift) + def shift(self, periods=1, fill_value=None, axis=0): + + fill_value = self._validate_shift_value(fill_value) + new_values = shift(self._ndarray, periods, axis, fill_value) + + return self._from_backing_data(new_values) + + def _validate_shift_value(self, fill_value): + # TODO: after deprecation in datetimelikearraymixin is enforced, + # we can remove this and ust validate_fill_value directly + return self._validate_fill_value(fill_value) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index c3c9009dda659..02305479bef67 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -44,8 +44,7 @@ from pandas.core.accessor import PandasDelegate, delegate_names import pandas.core.algorithms as algorithms from pandas.core.algorithms import _get_data_algo, factorize, take_1d, unique1d -from pandas.core.array_algos.transforms import shift -from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.base import ( ExtensionArray, NoNewAttributesMixin, @@ -1193,35 +1192,6 @@ def map(self, mapper): __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) - def shift(self, periods, fill_value=None): - """ - Shift Categorical by desired number of periods. - - Parameters - ---------- - periods : int - Number of periods to move, can be positive or negative - fill_value : object, optional - The scalar value to use for newly introduced missing values. - - .. versionadded:: 0.24.0 - - Returns - ------- - shifted : Categorical - """ - # since categoricals always have ndim == 1, an axis parameter - # doesn't make any sense here. - codes = self.codes - if codes.ndim > 1: - raise NotImplementedError("Categorical with ndim > 1.") - - fill_value = self._validate_fill_value(fill_value) - - codes = shift(codes, periods, axis=0, fill_value=fill_value) - - return self._constructor(codes, dtype=self.dtype, fastpath=True) - def _validate_fill_value(self, fill_value): """ Convert a user-facing fill_value to a representation to use with our @@ -1383,20 +1353,6 @@ def notna(self): notnull = notna - def dropna(self): - """ - Return the Categorical without null values. - - Missing values (-1 in .codes) are detected. - - Returns - ------- - valid : Categorical - """ - result = self[self.notna()] - - return result - def value_counts(self, dropna=True): """ Return a Series containing counts of each category. @@ -1749,81 +1705,6 @@ def fillna(self, value=None, method=None, limit=None): return self._constructor(codes, dtype=self.dtype, fastpath=True) - def take(self: _T, indexer, allow_fill: bool = False, fill_value=None) -> _T: - """ - Take elements from the Categorical. - - Parameters - ---------- - indexer : sequence of int - The indices in `self` to take. The meaning of negative values in - `indexer` depends on the value of `allow_fill`. - allow_fill : bool, default False - How to handle negative values in `indexer`. - - * False: negative values in `indices` indicate positional indices - from the right. This is similar to - :func:`numpy.take`. - - * True: negative values in `indices` indicate missing values - (the default). These values are set to `fill_value`. Any other - other negative values raise a ``ValueError``. - - .. versionchanged:: 1.0.0 - - Default value changed from ``True`` to ``False``. - - fill_value : object - The value to use for `indices` that are missing (-1), when - ``allow_fill=True``. This should be the category, i.e. a value - in ``self.categories``, not a code. - - Returns - ------- - Categorical - This Categorical will have the same categories and ordered as - `self`. - - See Also - -------- - Series.take : Similar method for Series. - numpy.ndarray.take : Similar method for NumPy arrays. - - Examples - -------- - >>> cat = pd.Categorical(['a', 'a', 'b']) - >>> cat - ['a', 'a', 'b'] - Categories (2, object): ['a', 'b'] - - Specify ``allow_fill==False`` to have negative indices mean indexing - from the right. - - >>> cat.take([0, -1, -2], allow_fill=False) - ['a', 'b', 'a'] - Categories (2, object): ['a', 'b'] - - With ``allow_fill=True``, indices equal to ``-1`` mean "missing" - values that should be filled with the `fill_value`, which is - ``np.nan`` by default. - - >>> cat.take([0, -1, -1], allow_fill=True) - ['a', NaN, NaN] - Categories (2, object): ['a', 'b'] - - The fill value can be specified. - - >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a') - ['a', 'a', 'a'] - Categories (2, object): ['a', 'b'] - - Specifying a fill value that's not in ``self.categories`` - will raise a ``ValueError``. - """ - return NDArrayBackedExtensionArray.take( - self, indexer, allow_fill=allow_fill, fill_value=fill_value - ) - # ------------------------------------------------------------------ # NDArrayBackedExtensionArray compat @@ -1861,6 +1742,9 @@ def __contains__(self, key) -> bool: return contains(self, key, container=self._codes) + # ------------------------------------------------------------------ + # Rendering Methods + def _tidy_repr(self, max_vals=10, footer=True) -> str: """ a short repr displaying only max_vals and an optional (but default @@ -1959,6 +1843,8 @@ def __repr__(self) -> str: return result + # ------------------------------------------------------------------ + def _maybe_coerce_indexer(self, indexer): """ return an indexer coerced to the codes dtype diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 5a44f87400b79..a5b8032974fa4 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -54,9 +54,8 @@ from pandas.core import missing, nanops, ops from pandas.core.algorithms import checked_add_with_arr, unique1d, value_counts -from pandas.core.array_algos.transforms import shift from pandas.core.arrays._mixins import _T, NDArrayBackedExtensionArray -from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin +from pandas.core.arrays.base import ExtensionOpsMixin import pandas.core.common as com from pandas.core.construction import array, extract_array from pandas.core.indexers import check_array_indexer @@ -672,18 +671,11 @@ def view(self, dtype=None): @classmethod def _concat_same_type(cls, to_concat, axis: int = 0): - - # do not pass tz to set because tzlocal cannot be hashed - dtypes = {str(x.dtype) for x in to_concat} - if len(dtypes) != 1: - raise ValueError("to_concat must have the same dtype (tz)", dtypes) + new_obj = super()._concat_same_type(to_concat, axis) obj = to_concat[0] dtype = obj.dtype - i8values = [x.asi8 for x in to_concat] - values = np.concatenate(i8values, axis=axis) - new_freq = None if is_period_dtype(dtype): new_freq = obj.freq @@ -697,11 +689,13 @@ def _concat_same_type(cls, to_concat, axis: int = 0): if all(pair[0][-1] + obj.freq == pair[1][0] for pair in pairs): new_freq = obj.freq - return cls._simple_new(values, dtype=dtype, freq=new_freq) + new_obj._freq = new_freq + return new_obj def copy(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT: - values = self.asi8.copy() - return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq) + new_obj = super().copy() + new_obj._freq = self.freq + return new_obj def _values_for_factorize(self): return self.asi8, iNaT @@ -713,14 +707,6 @@ def _from_factorized(cls, values, original): def _values_for_argsort(self): return self._data - @Appender(ExtensionArray.shift.__doc__) - def shift(self, periods=1, fill_value=None, axis=0): - - fill_value = self._validate_shift_value(fill_value) - new_values = shift(self._data, periods, axis, fill_value) - - return type(self)._simple_new(new_values, dtype=self.dtype) - # ------------------------------------------------------------------ # Validation Methods # TODO: try to de-duplicate these, ensure identical behavior diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 23a4a70734c81..588d68514649a 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -7,7 +7,6 @@ from pandas._libs import lib from pandas._typing import Scalar from pandas.compat.numpy import function as nv -from pandas.util._decorators import doc from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.dtypes import ExtensionDtype @@ -16,10 +15,9 @@ from pandas import compat from pandas.core import nanops, ops -from pandas.core.algorithms import searchsorted from pandas.core.array_algos import masked_reductions from pandas.core.arrays._mixins import NDArrayBackedExtensionArray -from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin +from pandas.core.arrays.base import ExtensionOpsMixin from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer from pandas.core.missing import backfill_1d, pad_1d @@ -189,10 +187,6 @@ def _from_sequence(cls, scalars, dtype=None, copy: bool = False) -> "PandasArray def _from_factorized(cls, values, original) -> "PandasArray": return cls(values) - @classmethod - def _concat_same_type(cls, to_concat) -> "PandasArray": - return cls(np.concatenate(to_concat)) - def _from_backing_data(self, arr: np.ndarray) -> "PandasArray": return type(self)(arr) @@ -423,10 +417,6 @@ def to_numpy( return result - @doc(ExtensionArray.searchsorted) - def searchsorted(self, value, side="left", sorter=None): - return searchsorted(self.to_numpy(), value, side=side, sorter=sorter) - # ------------------------------------------------------------------------ # Ops From 9083281a1d37f8033ecae0a4f451b6aed3131a2b Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Sun, 6 Sep 2020 13:11:04 -0400 Subject: [PATCH 456/632] CLN: Separate transform tests (#36146) --- pandas/tests/frame/apply/test_frame_apply.py | 49 +------------ .../tests/frame/apply/test_frame_transform.py | 72 +++++++++++++++++++ pandas/tests/frame/common.py | 24 +++++++ .../tests/series/apply/test_series_apply.py | 31 +------- .../series/apply/test_series_transform.py | 59 +++++++++++++++ 5 files changed, 157 insertions(+), 78 deletions(-) create mode 100644 pandas/tests/frame/apply/test_frame_transform.py create mode 100644 pandas/tests/series/apply/test_series_transform.py diff --git a/pandas/tests/frame/apply/test_frame_apply.py b/pandas/tests/frame/apply/test_frame_apply.py index 5a1e448beb40f..bc09501583e2c 100644 --- a/pandas/tests/frame/apply/test_frame_apply.py +++ b/pandas/tests/frame/apply/test_frame_apply.py @@ -1,7 +1,6 @@ from collections import OrderedDict from datetime import datetime from itertools import chain -import operator import warnings import numpy as np @@ -14,6 +13,7 @@ import pandas._testing as tm from pandas.core.apply import frame_apply from pandas.core.base import SpecificationError +from pandas.tests.frame.common import zip_frames @pytest.fixture @@ -1058,25 +1058,6 @@ def test_consistency_for_boxed(self, box, int_frame_const_col): tm.assert_frame_equal(result, expected) -def zip_frames(frames, axis=1): - """ - take a list of frames, zip them together under the - assumption that these all have the first frames' index/columns. - - Returns - ------- - new_frame : DataFrame - """ - if axis == 1: - columns = frames[0].columns - zipped = [f.loc[:, c] for c in columns for f in frames] - return pd.concat(zipped, axis=1) - else: - index = frames[0].index - zipped = [f.loc[i, :] for i in index for f in frames] - return pd.DataFrame(zipped) - - class TestDataFrameAggregate: def test_agg_transform(self, axis, float_frame): other_axis = 1 if axis in {0, "index"} else 0 @@ -1087,16 +1068,10 @@ def test_agg_transform(self, axis, float_frame): f_sqrt = np.sqrt(float_frame) # ufunc - result = float_frame.transform(np.sqrt, axis=axis) expected = f_sqrt.copy() - tm.assert_frame_equal(result, expected) - result = float_frame.apply(np.sqrt, axis=axis) tm.assert_frame_equal(result, expected) - result = float_frame.transform(np.sqrt, axis=axis) - tm.assert_frame_equal(result, expected) - # list-like result = float_frame.apply([np.sqrt], axis=axis) expected = f_sqrt.copy() @@ -1110,9 +1085,6 @@ def test_agg_transform(self, axis, float_frame): ) tm.assert_frame_equal(result, expected) - result = float_frame.transform([np.sqrt], axis=axis) - tm.assert_frame_equal(result, expected) - # multiple items in list # these are in the order as if we are applying both # functions per series and then concatting @@ -1128,38 +1100,19 @@ def test_agg_transform(self, axis, float_frame): ) tm.assert_frame_equal(result, expected) - result = float_frame.transform([np.abs, "sqrt"], axis=axis) - tm.assert_frame_equal(result, expected) - def test_transform_and_agg_err(self, axis, float_frame): # cannot both transform and agg - msg = "transforms cannot produce aggregated results" - with pytest.raises(ValueError, match=msg): - float_frame.transform(["max", "min"], axis=axis) - msg = "cannot combine transform and aggregation operations" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): float_frame.agg(["max", "sqrt"], axis=axis) - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - float_frame.transform(["max", "sqrt"], axis=axis) - df = pd.DataFrame({"A": range(5), "B": 5}) def f(): with np.errstate(all="ignore"): df.agg({"A": ["abs", "sum"], "B": ["mean", "max"]}, axis=axis) - @pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) - def test_transform_method_name(self, method): - # GH 19760 - df = pd.DataFrame({"A": [-1, 2]}) - result = df.transform(method) - expected = operator.methodcaller(method)(df) - tm.assert_frame_equal(result, expected) - def test_demo(self): # demonstration tests df = pd.DataFrame({"A": range(5), "B": 5}) diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py new file mode 100644 index 0000000000000..3a345215482ed --- /dev/null +++ b/pandas/tests/frame/apply/test_frame_transform.py @@ -0,0 +1,72 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.frame.common import zip_frames + + +def test_agg_transform(axis, float_frame): + other_axis = 1 if axis in {0, "index"} else 0 + + with np.errstate(all="ignore"): + + f_abs = np.abs(float_frame) + f_sqrt = np.sqrt(float_frame) + + # ufunc + result = float_frame.transform(np.sqrt, axis=axis) + expected = f_sqrt.copy() + tm.assert_frame_equal(result, expected) + + result = float_frame.transform(np.sqrt, axis=axis) + tm.assert_frame_equal(result, expected) + + # list-like + expected = f_sqrt.copy() + if axis in {0, "index"}: + expected.columns = pd.MultiIndex.from_product( + [float_frame.columns, ["sqrt"]] + ) + else: + expected.index = pd.MultiIndex.from_product([float_frame.index, ["sqrt"]]) + result = float_frame.transform([np.sqrt], axis=axis) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both + # functions per series and then concatting + expected = zip_frames([f_abs, f_sqrt], axis=other_axis) + if axis in {0, "index"}: + expected.columns = pd.MultiIndex.from_product( + [float_frame.columns, ["absolute", "sqrt"]] + ) + else: + expected.index = pd.MultiIndex.from_product( + [float_frame.index, ["absolute", "sqrt"]] + ) + result = float_frame.transform([np.abs, "sqrt"], axis=axis) + tm.assert_frame_equal(result, expected) + + +def test_transform_and_agg_err(axis, float_frame): + # cannot both transform and agg + msg = "transforms cannot produce aggregated results" + with pytest.raises(ValueError, match=msg): + float_frame.transform(["max", "min"], axis=axis) + + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + float_frame.transform(["max", "sqrt"], axis=axis) + + +@pytest.mark.parametrize("method", ["abs", "shift", "pct_change", "cumsum", "rank"]) +def test_transform_method_name(method): + # GH 19760 + df = pd.DataFrame({"A": [-1, 2]}) + result = df.transform(method) + expected = operator.methodcaller(method)(df) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py index 463a140972ab5..73e60ff389038 100644 --- a/pandas/tests/frame/common.py +++ b/pandas/tests/frame/common.py @@ -1,3 +1,8 @@ +from typing import List + +from pandas import DataFrame, concat + + def _check_mixed_float(df, dtype=None): # float16 are most likely to be upcasted to float32 dtypes = dict(A="float32", B="float32", C="float16", D="float64") @@ -29,3 +34,22 @@ def _check_mixed_int(df, dtype=None): assert df.dtypes["C"] == dtypes["C"] if dtypes.get("D"): assert df.dtypes["D"] == dtypes["D"] + + +def zip_frames(frames: List[DataFrame], axis: int = 1) -> DataFrame: + """ + take a list of frames, zip them together under the + assumption that these all have the first frames' index/columns. + + Returns + ------- + new_frame : DataFrame + """ + if axis == 1: + columns = frames[0].columns + zipped = [f.loc[:, c] for c in columns for f in frames] + return concat(zipped, axis=1) + else: + index = frames[0].index + zipped = [f.loc[i, :] for i in index for f in frames] + return DataFrame(zipped) diff --git a/pandas/tests/series/apply/test_series_apply.py b/pandas/tests/series/apply/test_series_apply.py index 308398642895c..b948317f32062 100644 --- a/pandas/tests/series/apply/test_series_apply.py +++ b/pandas/tests/series/apply/test_series_apply.py @@ -209,25 +209,16 @@ def test_transform(self, string_series): f_abs = np.abs(string_series) # ufunc - result = string_series.transform(np.sqrt) expected = f_sqrt.copy() - tm.assert_series_equal(result, expected) - result = string_series.apply(np.sqrt) tm.assert_series_equal(result, expected) # list-like - result = string_series.transform([np.sqrt]) + result = string_series.apply([np.sqrt]) expected = f_sqrt.to_frame().copy() expected.columns = ["sqrt"] tm.assert_frame_equal(result, expected) - result = string_series.transform([np.sqrt]) - tm.assert_frame_equal(result, expected) - - result = string_series.transform(["sqrt"]) - tm.assert_frame_equal(result, expected) - # multiple items in list # these are in the order as if we are applying both functions per # series and then concatting @@ -236,10 +227,6 @@ def test_transform(self, string_series): result = string_series.apply([np.sqrt, np.abs]) tm.assert_frame_equal(result, expected) - result = string_series.transform(["sqrt", "abs"]) - expected.columns = ["sqrt", "abs"] - tm.assert_frame_equal(result, expected) - # dict, provide renaming expected = pd.concat([f_sqrt, f_abs], axis=1) expected.columns = ["foo", "bar"] @@ -250,19 +237,11 @@ def test_transform(self, string_series): def test_transform_and_agg_error(self, string_series): # we are trying to transform with an aggregator - msg = "transforms cannot produce aggregated results" - with pytest.raises(ValueError, match=msg): - string_series.transform(["min", "max"]) - msg = "cannot combine transform and aggregation" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): string_series.agg(["sqrt", "max"]) - with pytest.raises(ValueError, match=msg): - with np.errstate(all="ignore"): - string_series.transform(["sqrt", "max"]) - msg = "cannot perform both aggregation and transformation" with pytest.raises(ValueError, match=msg): with np.errstate(all="ignore"): @@ -463,14 +442,6 @@ def test_agg_cython_table_raises(self, series, func, expected): # e.g. Series('a b'.split()).cumprod() will raise series.agg(func) - def test_transform_none_to_type(self): - # GH34377 - df = pd.DataFrame({"a": [None]}) - - msg = "DataFrame constructor called with incompatible data and dtype" - with pytest.raises(TypeError, match=msg): - df.transform({"a": int}) - class TestSeriesMap: def test_map(self, datetime_series): diff --git a/pandas/tests/series/apply/test_series_transform.py b/pandas/tests/series/apply/test_series_transform.py new file mode 100644 index 0000000000000..8bc3d2dc4d0db --- /dev/null +++ b/pandas/tests/series/apply/test_series_transform.py @@ -0,0 +1,59 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_transform(string_series): + # transforming functions + + with np.errstate(all="ignore"): + f_sqrt = np.sqrt(string_series) + f_abs = np.abs(string_series) + + # ufunc + result = string_series.transform(np.sqrt) + expected = f_sqrt.copy() + tm.assert_series_equal(result, expected) + + # list-like + result = string_series.transform([np.sqrt]) + expected = f_sqrt.to_frame().copy() + expected.columns = ["sqrt"] + tm.assert_frame_equal(result, expected) + + result = string_series.transform([np.sqrt]) + tm.assert_frame_equal(result, expected) + + result = string_series.transform(["sqrt"]) + tm.assert_frame_equal(result, expected) + + # multiple items in list + # these are in the order as if we are applying both functions per + # series and then concatting + expected = pd.concat([f_sqrt, f_abs], axis=1) + result = string_series.transform(["sqrt", "abs"]) + expected.columns = ["sqrt", "abs"] + tm.assert_frame_equal(result, expected) + + +def test_transform_and_agg_error(string_series): + # we are trying to transform with an aggregator + msg = "transforms cannot produce aggregated results" + with pytest.raises(ValueError, match=msg): + string_series.transform(["min", "max"]) + + msg = "cannot combine transform and aggregation operations" + with pytest.raises(ValueError, match=msg): + with np.errstate(all="ignore"): + string_series.transform(["sqrt", "max"]) + + +def test_transform_none_to_type(): + # GH34377 + df = pd.DataFrame({"a": [None]}) + + msg = "DataFrame constructor called with incompatible data and dtype" + with pytest.raises(TypeError, match=msg): + df.transform({"a": int}) From b431e9a1fa68b17b18806bb6c44d21f0e4e0c86d Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Sun, 6 Sep 2020 13:24:03 -0400 Subject: [PATCH 457/632] CLN: _wrap_applied_output (#36160) --- pandas/core/groupby/generic.py | 191 ++++++++++++++++----------------- 1 file changed, 91 insertions(+), 100 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 260e21b1f2593..72003eab24b29 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1192,113 +1192,104 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return self.obj._constructor() elif isinstance(first_not_none, DataFrame): return self._concat_objects(keys, values, not_indexed_same=not_indexed_same) - else: - key_index = self.grouper.result_index if self.as_index else None - - if isinstance(first_not_none, Series): - # this is to silence a DeprecationWarning - # TODO: Remove when default dtype of empty Series is object - kwargs = first_not_none._construct_axes_dict() - backup = create_series_with_explicit_dtype( - dtype_if_empty=object, **kwargs - ) - - values = [x if (x is not None) else backup for x in values] - v = values[0] - - if not isinstance(v, (np.ndarray, Index, Series)) and self.as_index: - # values are not series or array-like but scalars - # self._selection_name not passed through to Series as the - # result should not take the name of original selection - # of columns - return self.obj._constructor_sliced(values, index=key_index) + key_index = self.grouper.result_index if self.as_index else None + + if isinstance(first_not_none, Series): + # this is to silence a DeprecationWarning + # TODO: Remove when default dtype of empty Series is object + kwargs = first_not_none._construct_axes_dict() + backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs) + + values = [x if (x is not None) else backup for x in values] + + v = values[0] + + if not isinstance(v, (np.ndarray, Index, Series)) and self.as_index: + # values are not series or array-like but scalars + # self._selection_name not passed through to Series as the + # result should not take the name of original selection + # of columns + return self.obj._constructor_sliced(values, index=key_index) + + if isinstance(v, Series): + all_indexed_same = all_indexes_same((x.index for x in values)) + + # GH3596 + # provide a reduction (Frame -> Series) if groups are + # unique + if self.squeeze: + applied_index = self._selected_obj._get_axis(self.axis) + singular_series = len(values) == 1 and applied_index.nlevels == 1 + + # assign the name to this series + if singular_series: + values[0].name = keys[0] + + # GH2893 + # we have series in the values array, we want to + # produce a series: + # if any of the sub-series are not indexed the same + # OR we don't have a multi-index and we have only a + # single values + return self._concat_objects( + keys, values, not_indexed_same=not_indexed_same + ) + # still a series + # path added as of GH 5545 + elif all_indexed_same: + from pandas.core.reshape.concat import concat + + return concat(values) + + if not all_indexed_same: + # GH 8467 + return self._concat_objects(keys, values, not_indexed_same=True) + + # Combine values + # vstack+constructor is faster than concat and handles MI-columns + stacked_values = np.vstack([np.asarray(v) for v in values]) + + if self.axis == 0: + index = key_index + columns = v.index.copy() + if columns.name is None: + # GH6124 - propagate name of Series when it's consistent + names = {v.name for v in values} + if len(names) == 1: + columns.name = list(names)[0] else: - if isinstance(v, Series): - all_indexed_same = all_indexes_same((x.index for x in values)) - - # GH3596 - # provide a reduction (Frame -> Series) if groups are - # unique - if self.squeeze: - applied_index = self._selected_obj._get_axis(self.axis) - singular_series = ( - len(values) == 1 and applied_index.nlevels == 1 - ) - - # assign the name to this series - if singular_series: - values[0].name = keys[0] - - # GH2893 - # we have series in the values array, we want to - # produce a series: - # if any of the sub-series are not indexed the same - # OR we don't have a multi-index and we have only a - # single values - return self._concat_objects( - keys, values, not_indexed_same=not_indexed_same - ) - - # still a series - # path added as of GH 5545 - elif all_indexed_same: - from pandas.core.reshape.concat import concat - - return concat(values) - - if not all_indexed_same: - # GH 8467 - return self._concat_objects(keys, values, not_indexed_same=True) - - # Combine values - # vstack+constructor is faster than concat and handles MI-columns - stacked_values = np.vstack([np.asarray(v) for v in values]) - - if self.axis == 0: - index = key_index - columns = v.index.copy() - if columns.name is None: - # GH6124 - propagate name of Series when it's consistent - names = {v.name for v in values} - if len(names) == 1: - columns.name = list(names)[0] - else: - index = v.index - columns = key_index - stacked_values = stacked_values.T - - result = self.obj._constructor( - stacked_values, index=index, columns=columns - ) + index = v.index + columns = key_index + stacked_values = stacked_values.T - elif not self.as_index: - # We add grouping column below, so create a frame here - result = DataFrame( - values, index=key_index, columns=[self._selection] - ) - else: - # GH#1738: values is list of arrays of unequal lengths - # fall through to the outer else clause - # TODO: sure this is right? we used to do this - # after raising AttributeError above - return self.obj._constructor_sliced( - values, index=key_index, name=self._selection_name - ) + result = self.obj._constructor(stacked_values, index=index, columns=columns) - # if we have date/time like in the original, then coerce dates - # as we are stacking can easily have object dtypes here - so = self._selected_obj - if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any(): - result = _recast_datetimelike_result(result) - else: - result = result._convert(datetime=True) + elif not self.as_index: + # We add grouping column below, so create a frame here + result = DataFrame(values, index=key_index, columns=[self._selection]) + else: + # GH#1738: values is list of arrays of unequal lengths + # fall through to the outer else clause + # TODO: sure this is right? we used to do this + # after raising AttributeError above + return self.obj._constructor_sliced( + values, index=key_index, name=self._selection_name + ) + + # if we have date/time like in the original, then coerce dates + # as we are stacking can easily have object dtypes here + so = self._selected_obj + if so.ndim == 2 and so.dtypes.apply(needs_i8_conversion).any(): + result = _recast_datetimelike_result(result) + else: + result = result._convert(datetime=True) - if not self.as_index: - self._insert_inaxis_grouper_inplace(result) + if not self.as_index: + self._insert_inaxis_grouper_inplace(result) - return self._reindex_output(result) + return self._reindex_output(result) def _transform_general( self, func, *args, engine="cython", engine_kwargs=None, **kwargs From ba552ec0e3fd077eabbfbc98cae6d45980eb0d09 Mon Sep 17 00:00:00 2001 From: Alex Kirko Date: Sun, 6 Sep 2020 20:47:40 +0300 Subject: [PATCH 458/632] BUG: allow missing values in Index when calling Index.sort_values (#35604) --- doc/source/whatsnew/v1.2.0.rst | 3 +- pandas/conftest.py | 23 ++++++++ pandas/core/indexes/base.py | 27 ++++++++-- .../tests/indexes/interval/test_interval.py | 2 +- pandas/tests/indexes/period/test_ops.py | 16 ++++-- pandas/tests/indexes/test_common.py | 52 ++++++++++++++++++- 6 files changed, 112 insertions(+), 11 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ff9e803b4990a..b4fdbf9588ffe 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -270,8 +270,9 @@ Interval Indexing ^^^^^^^^ + - Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`) -- +- Bug in :meth:`Index.sort_values` where, when empty values were passed, the method would break by trying to compare missing values instead of pushing them to the end of the sort order. (:issue:`35584`) - Missing diff --git a/pandas/conftest.py b/pandas/conftest.py index 0878380d00837..5474005a63b8e 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -437,6 +437,29 @@ def index(request): index_fixture2 = index +@pytest.fixture(params=indices_dict.keys()) +def index_with_missing(request): + """ + Fixture for indices with missing values + """ + if request.param in ["int", "uint", "range", "empty", "repeats"]: + pytest.xfail("missing values not supported") + # GH 35538. Use deep copy to avoid illusive bug on np-dev + # Azure pipeline that writes into indices_dict despite copy + ind = indices_dict[request.param].copy(deep=True) + vals = ind.values + if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]: + # For setting missing values in the top level of MultiIndex + vals = ind.tolist() + vals[0] = tuple([None]) + vals[0][1:] + vals[-1] = tuple([None]) + vals[-1][1:] + return MultiIndex.from_tuples(vals) + else: + vals[0] = None + vals[-1] = None + return type(ind)(vals) + + # ---------------------------------------------------------------- # Series' # ---------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 65b5dfb6df911..a1bc8a4659b24 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -88,7 +88,7 @@ import pandas.core.missing as missing from pandas.core.ops import get_op_result_name from pandas.core.ops.invalid import make_invalid_op -from pandas.core.sorting import ensure_key_mapped +from pandas.core.sorting import ensure_key_mapped, nargsort from pandas.core.strings import StringMethods from pandas.io.formats.printing import ( @@ -4443,7 +4443,11 @@ def asof_locs(self, where, mask): return result def sort_values( - self, return_indexer=False, ascending=True, key: Optional[Callable] = None + self, + return_indexer=False, + ascending=True, + na_position: str_t = "last", + key: Optional[Callable] = None, ): """ Return a sorted copy of the index. @@ -4457,6 +4461,12 @@ def sort_values( Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. + na_position : {'first' or 'last'}, default 'last' + Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at + the end. + + .. versionadded:: 1.2.0 + key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the @@ -4497,9 +4507,16 @@ def sort_values( """ idx = ensure_key_mapped(self, key) - _as = idx.argsort() - if not ascending: - _as = _as[::-1] + # GH 35584. Sort missing values according to na_position kwarg + # ignore na_position for MutiIndex + if not isinstance(self, ABCMultiIndex): + _as = nargsort( + items=idx, ascending=ascending, na_position=na_position, key=key + ) + else: + _as = idx.argsort() + if not ascending: + _as = _as[::-1] sorted_index = self.take(_as) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index a20e542b1edd7..42849e0bbb5c7 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -618,7 +618,7 @@ def test_sort_values(self, closed): expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan]) tm.assert_index_equal(result, expected) - result = index.sort_values(ascending=False) + result = index.sort_values(ascending=False, na_position="first") expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)]) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index e7dd76584d780..d1b34c315b682 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -174,9 +174,6 @@ def _check_freq(index, expected_index): ordered, indexer = idx.sort_values(return_indexer=True, ascending=False) tm.assert_index_equal(ordered, expected[::-1]) - - exp = np.array([2, 1, 3, 4, 0]) - tm.assert_numpy_array_equal(indexer, exp, check_dtype=False) _check_freq(ordered, idx) pidx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D") @@ -333,3 +330,16 @@ def test_freq_setter_deprecated(self): # warning for setter with pytest.raises(AttributeError, match="can't set attribute"): idx.freq = pd.offsets.Day() + + +@pytest.mark.xfail(reason="Datetime-like sort_values currently unstable (GH 35922)") +def test_order_stability_compat(): + # GH 35584. The new implementation of sort_values for Index.sort_values + # is stable when sorting in descending order. Datetime-like sort_values + # currently aren't stable. xfail should be removed after + # the implementations' behavior is synchronized (xref GH 35922) + pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="A") + iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx") + ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False) + ordered2, indexer2 = iidx.sort_values(return_indexer=True, ascending=False) + tm.assert_numpy_array_equal(indexer1, indexer2) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index db260b71e7186..aa6b395176b06 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -13,7 +13,14 @@ from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd -from pandas import CategoricalIndex, MultiIndex, RangeIndex +from pandas import ( + CategoricalIndex, + DatetimeIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + TimedeltaIndex, +) import pandas._testing as tm @@ -391,3 +398,46 @@ def test_astype_preserves_name(self, index, dtype): assert result.names == index.names else: assert result.name == index.name + + +@pytest.mark.parametrize("na_position", [None, "middle"]) +def test_sort_values_invalid_na_position(index_with_missing, na_position): + if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # datetime-like indices will get na_position kwarg as part of + # synchronizing duplicate-sorting behavior, because we currently expect + # them, other indices, and Series to sort differently (xref 35922) + pytest.xfail("sort_values does not support na_position kwarg") + elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): + pytest.xfail("missing value sorting order not defined for index type") + + if na_position not in ["first", "last"]: + with pytest.raises( + ValueError, match=f"invalid na_position: {na_position}", + ): + index_with_missing.sort_values(na_position=na_position) + + +@pytest.mark.parametrize("na_position", ["first", "last"]) +def test_sort_values_with_missing(index_with_missing, na_position): + # GH 35584. Test that sort_values works with missing values, + # sort non-missing and place missing according to na_position + + if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # datetime-like indices will get na_position kwarg as part of + # synchronizing duplicate-sorting behavior, because we currently expect + # them, other indices, and Series to sort differently (xref 35922) + pytest.xfail("sort_values does not support na_position kwarg") + elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): + pytest.xfail("missing value sorting order not defined for index type") + + missing_count = np.sum(index_with_missing.isna()) + not_na_vals = index_with_missing[index_with_missing.notna()].values + sorted_values = np.sort(not_na_vals) + if na_position == "first": + sorted_values = np.concatenate([[None] * missing_count, sorted_values]) + else: + sorted_values = np.concatenate([sorted_values, [None] * missing_count]) + expected = type(index_with_missing)(sorted_values) + + result = index_with_missing.sort_values(na_position=na_position) + tm.assert_index_equal(result, expected) From aca77f7b1cc7987a5b757a3f80a278e1f5fc7998 Mon Sep 17 00:00:00 2001 From: Honfung Wong Date: Mon, 7 Sep 2020 01:49:26 +0800 Subject: [PATCH 459/632] BUG: extra leading space in to_string when index=False (#36094) --- doc/source/whatsnew/v1.2.0.rst | 5 ++- pandas/io/formats/format.py | 28 +++++++++++----- pandas/tests/io/formats/test_format.py | 42 +++++++++++++++++++++--- pandas/tests/io/formats/test_to_latex.py | 22 ++++++------- 4 files changed, 71 insertions(+), 26 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b4fdbf9588ffe..9a778acba4764 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -214,8 +214,6 @@ Performance improvements Bug fixes ~~~~~~~~~ -- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) -- Categorical ^^^^^^^^^^^ @@ -257,7 +255,7 @@ Conversion Strings ^^^^^^^ - +- Bug in :meth:`Series.to_string`, :meth:`DataFrame.to_string`, and :meth:`DataFrame.to_latex` adding a leading space when ``index=False`` (:issue:`24980`) - - @@ -315,6 +313,7 @@ Groupby/resample/rolling - Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values. (:issue:`9959`) - Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) +- Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) - Reshaping diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 53b2b533215f0..70e38c3106bdb 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -345,6 +345,7 @@ def _get_formatted_values(self) -> List[str]: None, float_format=self.float_format, na_rep=self.na_rep, + leading_space=self.index, ) def to_string(self) -> str: @@ -960,6 +961,7 @@ def _format_col(self, i: int) -> List[str]: na_rep=self.na_rep, space=self.col_space.get(frame.columns[i]), decimal=self.decimal, + leading_space=self.index, ) def to_html( @@ -1111,7 +1113,7 @@ def format_array( space: Optional[Union[str, int]] = None, justify: str = "right", decimal: str = ".", - leading_space: Optional[bool] = None, + leading_space: Optional[bool] = True, quoting: Optional[int] = None, ) -> List[str]: """ @@ -1127,7 +1129,7 @@ def format_array( space justify decimal - leading_space : bool, optional + leading_space : bool, optional, default True Whether the array should be formatted with a leading space. When an array as a column of a Series or DataFrame, we do want the leading space to pad between columns. @@ -1194,7 +1196,7 @@ def __init__( decimal: str = ".", quoting: Optional[int] = None, fixed_width: bool = True, - leading_space: Optional[bool] = None, + leading_space: Optional[bool] = True, ): self.values = values self.digits = digits @@ -1395,9 +1397,11 @@ def format_values_with(float_format): float_format: Optional[FloatFormatType] if self.float_format is None: if self.fixed_width: - float_format = partial( - "{value: .{digits:d}f}".format, digits=self.digits - ) + if self.leading_space is True: + fmt_str = "{value: .{digits:d}f}" + else: + fmt_str = "{value:.{digits:d}f}" + float_format = partial(fmt_str.format, digits=self.digits) else: float_format = self.float_format else: @@ -1429,7 +1433,11 @@ def format_values_with(float_format): ).any() if has_small_values or (too_long and has_large_values): - float_format = partial("{value: .{digits:d}e}".format, digits=self.digits) + if self.leading_space is True: + fmt_str = "{value: .{digits:d}e}" + else: + fmt_str = "{value:.{digits:d}e}" + float_format = partial(fmt_str.format, digits=self.digits) formatted_values = format_values_with(float_format) return formatted_values @@ -1444,7 +1452,11 @@ def _format_strings(self) -> List[str]: class IntArrayFormatter(GenericArrayFormatter): def _format_strings(self) -> List[str]: - formatter = self.formatter or (lambda x: f"{x: d}") + if self.leading_space is False: + formatter_str = lambda x: f"{x:d}".format(x=x) + else: + formatter_str = lambda x: f"{x: d}".format(x=x) + formatter = self.formatter or formatter_str fmt_values = [formatter(x) for x in self.values] return fmt_values diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 1fb957505987f..f00fa6274fca2 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1546,11 +1546,11 @@ def test_to_string_no_index(self): df_s = df.to_string(index=False) # Leading space is expected for positive numbers. - expected = " x y z\n 11 33 AAA\n 22 -44 " + expected = " x y z\n11 33 AAA\n22 -44 " assert df_s == expected df_s = df[["y", "x", "z"]].to_string(index=False) - expected = " y x z\n 33 11 AAA\n-44 22 " + expected = " y x z\n 33 11 AAA\n-44 22 " assert df_s == expected def test_to_string_line_width_no_index(self): @@ -1565,7 +1565,7 @@ def test_to_string_line_width_no_index(self): df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]}) df_s = df.to_string(line_width=1, index=False) - expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 " + expected = " x \\\n11 \n22 \n33 \n\n y \n 4 \n 5 \n 6 " assert df_s == expected @@ -2269,7 +2269,7 @@ def test_to_string_without_index(self): # GH 11729 Test index=False option s = Series([1, 2, 3, 4]) result = s.to_string(index=False) - expected = " 1\n" + " 2\n" + " 3\n" + " 4" + expected = "1\n" + "2\n" + "3\n" + "4" assert result == expected def test_unicode_name_in_footer(self): @@ -3391,3 +3391,37 @@ def test_filepath_or_buffer_bad_arg_raises(float_frame, method): msg = "buf is not a file name and it has no write method" with pytest.raises(TypeError, match=msg): getattr(float_frame, method)(buf=object()) + + +@pytest.mark.parametrize( + "input_array, expected", + [ + ("a", "a"), + (["a", "b"], "a\nb"), + ([1, "a"], "1\na"), + (1, "1"), + ([0, -1], " 0\n-1"), + (1.0, "1.0"), + ([" a", " b"], " a\n b"), + ([".1", "1"], ".1\n 1"), + (["10", "-10"], " 10\n-10"), + ], +) +def test_format_remove_leading_space_series(input_array, expected): + # GH: 24980 + s = pd.Series(input_array).to_string(index=False) + assert s == expected + + +@pytest.mark.parametrize( + "input_array, expected", + [ + ({"A": ["a"]}, "A\na"), + ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"), + ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"), + ], +) +def test_format_remove_leading_space_dataframe(input_array, expected): + # GH: 24980 + df = pd.DataFrame(input_array).to_string(index=False) + assert df == expected diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 96a9ed2b86cf4..9dfd851e91c65 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -50,10 +50,10 @@ def test_to_latex(self, float_frame): withoutindex_result = df.to_latex(index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule - a & b \\ + a & b \\ \midrule - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \bottomrule \end{tabular} """ @@ -413,7 +413,7 @@ def test_to_latex_longtable(self): withoutindex_result = df.to_latex(index=False, longtable=True) withoutindex_expected = r"""\begin{longtable}{rl} \toprule - a & b \\ + a & b \\ \midrule \endhead \midrule @@ -423,8 +423,8 @@ def test_to_latex_longtable(self): \bottomrule \endlastfoot - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \end{longtable} """ @@ -663,8 +663,8 @@ def test_to_latex_no_header(self): withoutindex_result = df.to_latex(index=False, header=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule - 1 & b1 \\ - 2 & b2 \\ +1 & b1 \\ +2 & b2 \\ \bottomrule \end{tabular} """ @@ -690,10 +690,10 @@ def test_to_latex_specified_header(self): withoutindex_result = df.to_latex(header=["AA", "BB"], index=False) withoutindex_expected = r"""\begin{tabular}{rl} \toprule -AA & BB \\ +AA & BB \\ \midrule - 1 & b1 \\ - 2 & b2 \\ + 1 & b1 \\ + 2 & b2 \\ \bottomrule \end{tabular} """ From b9a97691de3e85771d84e786ff40351260a5cfcc Mon Sep 17 00:00:00 2001 From: Harsh Sharma <51477130+hs2361@users.noreply.github.com> Date: Mon, 7 Sep 2020 16:46:11 +0530 Subject: [PATCH 460/632] =?UTF-8?q?BUG:=20shows=20correct=20package=20name?= =?UTF-8?q?=20when=20import=5Foptional=5Fdependency=20is=20ca=E2=80=A6=20(?= =?UTF-8?q?#36134)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/compat/_optional.py | 21 +++++++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 1e946d325ace1..da261907565a1 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -41,6 +41,7 @@ Bug fixes - Bug in :meth:`Series.dt.isocalendar` and :meth:`DatetimeIndex.isocalendar` that returned incorrect year for certain dates (:issue:`36032`) - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) - Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) +- Bug in :meth:`import_optional_dependency` returning incorrect package names in cases where package name is different from import name (:issue:`35948`) .. --------------------------------------------------------------------------- diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 689c7c889ef66..40688a3978cfc 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -33,6 +33,19 @@ "numba": "0.46.0", } +# A mapping from import name to package name (on PyPI) for packages where +# these two names are different. + +INSTALL_MAPPING = { + "bs4": "beautifulsoup4", + "bottleneck": "Bottleneck", + "lxml.etree": "lxml", + "odf": "odfpy", + "pandas_gbq": "pandas-gbq", + "sqlalchemy": "SQLAlchemy", + "jinja2": "Jinja2", +} + def _get_version(module: types.ModuleType) -> str: version = getattr(module, "__version__", None) @@ -82,9 +95,13 @@ def import_optional_dependency( is False, or when the package's version is too old and `on_version` is ``'warn'``. """ + + package_name = INSTALL_MAPPING.get(name) + install_name = package_name if package_name is not None else name + msg = ( - f"Missing optional dependency '{name}'. {extra} " - f"Use pip or conda to install {name}." + f"Missing optional dependency '{install_name}'. {extra} " + f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) From 773f64d22782d0a304999c650d19123e183f496c Mon Sep 17 00:00:00 2001 From: ivanovmg <41443370+ivanovmg@users.noreply.github.com> Date: Tue, 8 Sep 2020 01:58:50 +0700 Subject: [PATCH 461/632] REF: simplify latex formatting (#35872) --- pandas/io/formats/format.py | 7 +- pandas/io/formats/latex.py | 778 +++++++++++++++++------ pandas/tests/io/formats/test_to_latex.py | 102 +++ 3 files changed, 694 insertions(+), 193 deletions(-) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 70e38c3106bdb..623dc6e6bad91 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -939,17 +939,18 @@ def to_latex( """ from pandas.io.formats.latex import LatexFormatter - return LatexFormatter( + latex_formatter = LatexFormatter( self, - column_format=column_format, longtable=longtable, + column_format=column_format, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow, caption=caption, label=label, position=position, - ).get_result(buf=buf, encoding=encoding) + ) + return latex_formatter.get_result(buf=buf, encoding=encoding) def _format_col(self, i: int) -> List[str]: frame = self.tr_frame diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 715b8bbdf5672..8080d953da308 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -1,7 +1,8 @@ """ Module for formatting output data in Latex. """ -from typing import IO, List, Optional, Tuple +from abc import ABC, abstractmethod +from typing import IO, Iterator, List, Optional, Type import numpy as np @@ -10,56 +11,95 @@ from pandas.io.formats.format import DataFrameFormatter, TableFormatter -class LatexFormatter(TableFormatter): - """ - Used to render a DataFrame to a LaTeX tabular/longtable environment output. +class RowStringConverter(ABC): + r"""Converter for dataframe rows into LaTeX strings. Parameters ---------- formatter : `DataFrameFormatter` - column_format : str, default None - The columns format as specified in `LaTeX table format - `__ e.g 'rcl' for 3 columns - longtable : boolean, default False - Use a longtable environment instead of tabular. + Instance of `DataFrameFormatter`. + multicolumn: bool, optional + Whether to use \multicolumn macro. + multicolumn_format: str, optional + Multicolumn format. + multirow: bool, optional + Whether to use \multirow macro. - See Also - -------- - HTMLFormatter """ def __init__( self, formatter: DataFrameFormatter, - column_format: Optional[str] = None, - longtable: bool = False, multicolumn: bool = False, multicolumn_format: Optional[str] = None, multirow: bool = False, - caption: Optional[str] = None, - label: Optional[str] = None, - position: Optional[str] = None, ): self.fmt = formatter self.frame = self.fmt.frame - self.bold_rows = self.fmt.bold_rows - self.column_format = column_format - self.longtable = longtable self.multicolumn = multicolumn self.multicolumn_format = multicolumn_format self.multirow = multirow - self.caption = caption - self.label = label - self.escape = self.fmt.escape - self.position = position - self._table_float = any(p is not None for p in (caption, label, position)) + self.clinebuf: List[List[int]] = [] + self.strcols = self._get_strcols() + self.strrows: List[List[str]] = ( + list(zip(*self.strcols)) # type: ignore[arg-type] + ) + + def get_strrow(self, row_num: int) -> str: + """Get string representation of the row.""" + row = self.strrows[row_num] + + is_multicol = ( + row_num < self.column_levels and self.fmt.header and self.multicolumn + ) + + is_multirow = ( + row_num >= self.header_levels + and self.fmt.index + and self.multirow + and self.index_levels > 1 + ) + + is_cline_maybe_required = is_multirow and row_num < len(self.strrows) - 1 + + crow = self._preprocess_row(row) + + if is_multicol: + crow = self._format_multicolumn(crow) + if is_multirow: + crow = self._format_multirow(crow, row_num) + + lst = [] + lst.append(" & ".join(crow)) + lst.append(" \\\\") + if is_cline_maybe_required: + cline = self._compose_cline(row_num, len(self.strcols)) + lst.append(cline) + return "".join(lst) + + @property + def _header_row_num(self) -> int: + """Number of rows in header.""" + return self.header_levels if self.fmt.header else 0 + + @property + def index_levels(self) -> int: + """Integer number of levels in index.""" + return self.frame.index.nlevels + + @property + def column_levels(self) -> int: + return self.frame.columns.nlevels + + @property + def header_levels(self) -> int: + nlevels = self.column_levels + if self.fmt.has_index_names and self.fmt.show_index_names: + nlevels += 1 + return nlevels - def write_result(self, buf: IO[str]) -> None: - """ - Render a DataFrame to a LaTeX tabular, longtable, or table/tabular - environment output. - """ - # string representation of the columns + def _get_strcols(self) -> List[List[str]]: + """String representation of the columns.""" if len(self.frame.columns) == 0 or len(self.frame.index) == 0: info_line = ( f"Empty {type(self.frame).__name__}\n" @@ -70,12 +110,6 @@ def write_result(self, buf: IO[str]) -> None: else: strcols = self.fmt._to_str_columns() - def get_col_type(dtype): - if issubclass(dtype.type, np.number): - return "r" - else: - return "l" - # reestablish the MultiIndex that has been joined by _to_str_column if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): out = self.frame.index.format( @@ -107,89 +141,19 @@ def pad_empties(x): # Get rid of old multiindex column and add new ones strcols = out + strcols[1:] + return strcols - if self.column_format is None: - dtypes = self.frame.dtypes._values - column_format = "".join(map(get_col_type, dtypes)) - if self.fmt.index: - index_format = "l" * self.frame.index.nlevels - column_format = index_format + column_format - elif not isinstance(self.column_format, str): # pragma: no cover - raise AssertionError( - f"column_format must be str or unicode, not {type(column_format)}" - ) + def _preprocess_row(self, row: List[str]) -> List[str]: + """Preprocess elements of the row.""" + if self.fmt.escape: + crow = _escape_symbols(row) else: - column_format = self.column_format - - self._write_tabular_begin(buf, column_format) - - buf.write("\\toprule\n") + crow = [x if x else "{}" for x in row] + if self.fmt.bold_rows and self.fmt.index: + crow = _convert_to_bold(crow, self.index_levels) + return crow - ilevels = self.frame.index.nlevels - clevels = self.frame.columns.nlevels - nlevels = clevels - if self.fmt.has_index_names and self.fmt.show_index_names: - nlevels += 1 - strrows = list(zip(*strcols)) - self.clinebuf: List[List[int]] = [] - - for i, row in enumerate(strrows): - if i == nlevels and self.fmt.header: - buf.write("\\midrule\n") # End of header - if self.longtable: - buf.write("\\endhead\n") - buf.write("\\midrule\n") - buf.write( - f"\\multicolumn{{{len(row)}}}{{r}}" - "{{Continued on next page}} \\\\\n" - ) - buf.write("\\midrule\n") - buf.write("\\endfoot\n\n") - buf.write("\\bottomrule\n") - buf.write("\\endlastfoot\n") - if self.escape: - # escape backslashes first - crow = [ - ( - x.replace("\\", "\\textbackslash ") - .replace("_", "\\_") - .replace("%", "\\%") - .replace("$", "\\$") - .replace("#", "\\#") - .replace("{", "\\{") - .replace("}", "\\}") - .replace("~", "\\textasciitilde ") - .replace("^", "\\textasciicircum ") - .replace("&", "\\&") - if (x and x != "{}") - else "{}" - ) - for x in row - ] - else: - crow = [x if x else "{}" for x in row] - if self.bold_rows and self.fmt.index: - # bold row labels - crow = [ - f"\\textbf{{{x}}}" - if j < ilevels and x.strip() not in ["", "{}"] - else x - for j, x in enumerate(crow) - ] - if i < clevels and self.fmt.header and self.multicolumn: - # sum up columns to multicolumns - crow = self._format_multicolumn(crow, ilevels) - if i >= nlevels and self.fmt.index and self.multirow and ilevels > 1: - # sum up rows to multirows - crow = self._format_multirow(crow, ilevels, i, strrows) - buf.write(" & ".join(crow)) - buf.write(" \\\\\n") - if self.multirow and i < len(strrows) - 1: - self._print_cline(buf, i, len(strcols)) - - self._write_tabular_end(buf) - - def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]: + def _format_multicolumn(self, row: List[str]) -> List[str]: r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format @@ -199,7 +163,7 @@ def _format_multicolumn(self, row: List[str], ilevels: int) -> List[str]: will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} """ - row2 = list(row[:ilevels]) + row2 = row[: self.index_levels] ncol = 1 coltext = "" @@ -214,7 +178,7 @@ def append_col(): else: row2.append(coltext) - for c in row[ilevels:]: + for c in row[self.index_levels :]: # if next col has text, write the previous if c.strip(): if coltext: @@ -229,9 +193,7 @@ def append_col(): append_col() return row2 - def _format_multirow( - self, row: List[str], ilevels: int, i: int, rows: List[Tuple[str, ...]] - ) -> List[str]: + def _format_multirow(self, row: List[str], i: int) -> List[str]: r""" Check following rows, whether row should be a multirow @@ -241,10 +203,10 @@ def _format_multirow( b & 0 & \cline{1-2} b & 0 & """ - for j in range(ilevels): + for j in range(self.index_levels): if row[j].strip(): nrow = 1 - for r in rows[i + 1 :]: + for r in self.strrows[i + 1 :]: if not r[j].strip(): nrow += 1 else: @@ -256,88 +218,524 @@ def _format_multirow( self.clinebuf.append([i + nrow - 1, j + 1]) return row - def _print_cline(self, buf: IO[str], i: int, icol: int) -> None: + def _compose_cline(self, i: int, icol: int) -> str: """ - Print clines after multirow-blocks are finished. + Create clines after multirow-blocks are finished. """ + lst = [] for cl in self.clinebuf: if cl[0] == i: - buf.write(f"\\cline{{{cl[1]:d}-{icol:d}}}\n") - # remove entries that have been written to buffer - self.clinebuf = [x for x in self.clinebuf if x[0] != i] + lst.append(f"\n\\cline{{{cl[1]:d}-{icol:d}}}") + # remove entries that have been written to buffer + self.clinebuf = [x for x in self.clinebuf if x[0] != i] + return "".join(lst) + + +class RowStringIterator(RowStringConverter): + """Iterator over rows of the header or the body of the table.""" + + @abstractmethod + def __iter__(self) -> Iterator[str]: + """Iterate over LaTeX string representations of rows.""" + + +class RowHeaderIterator(RowStringIterator): + """Iterator for the table header rows.""" + + def __iter__(self) -> Iterator[str]: + for row_num in range(len(self.strrows)): + if row_num < self._header_row_num: + yield self.get_strrow(row_num) + + +class RowBodyIterator(RowStringIterator): + """Iterator for the table body rows.""" + + def __iter__(self) -> Iterator[str]: + for row_num in range(len(self.strrows)): + if row_num >= self._header_row_num: + yield self.get_strrow(row_num) - def _write_tabular_begin(self, buf, column_format: str): - """ - Write the beginning of a tabular environment or - nested table/tabular environments including caption and label. + +class TableBuilderAbstract(ABC): + """ + Abstract table builder producing string representation of LaTeX table. + + Parameters + ---------- + formatter : `DataFrameFormatter` + Instance of `DataFrameFormatter`. + column_format: str, optional + Column format, for example, 'rcl' for three columns. + multicolumn: bool, optional + Use multicolumn to enhance MultiIndex columns. + multicolumn_format: str, optional + The alignment for multicolumns, similar to column_format. + multirow: bool, optional + Use multirow to enhance MultiIndex rows. + caption: str, optional + Table caption. + label: str, optional + LaTeX label. + position: str, optional + Float placement specifier, for example, 'htb'. + """ + + def __init__( + self, + formatter: DataFrameFormatter, + column_format: Optional[str] = None, + multicolumn: bool = False, + multicolumn_format: Optional[str] = None, + multirow: bool = False, + caption: Optional[str] = None, + label: Optional[str] = None, + position: Optional[str] = None, + ): + self.fmt = formatter + self.column_format = column_format + self.multicolumn = multicolumn + self.multicolumn_format = multicolumn_format + self.multirow = multirow + self.caption = caption + self.label = label + self.position = position + + def get_result(self) -> str: + """String representation of LaTeX table.""" + elements = [ + self.env_begin, + self.top_separator, + self.header, + self.middle_separator, + self.env_body, + self.bottom_separator, + self.env_end, + ] + result = "\n".join([item for item in elements if item]) + trailing_newline = "\n" + result += trailing_newline + return result + + @property + @abstractmethod + def env_begin(self) -> str: + """Beginning of the environment.""" + + @property + @abstractmethod + def top_separator(self) -> str: + """Top level separator.""" + + @property + @abstractmethod + def header(self) -> str: + """Header lines.""" + + @property + @abstractmethod + def middle_separator(self) -> str: + """Middle level separator.""" + + @property + @abstractmethod + def env_body(self) -> str: + """Environment body.""" + + @property + @abstractmethod + def bottom_separator(self) -> str: + """Bottom level separator.""" + + @property + @abstractmethod + def env_end(self) -> str: + """End of the environment.""" + + +class GenericTableBuilder(TableBuilderAbstract): + """Table builder producing string representation of LaTeX table.""" + + @property + def header(self) -> str: + iterator = self._create_row_iterator(over="header") + return "\n".join(list(iterator)) + + @property + def top_separator(self) -> str: + return "\\toprule" + + @property + def middle_separator(self) -> str: + return "\\midrule" if self._is_separator_required() else "" + + @property + def env_body(self) -> str: + iterator = self._create_row_iterator(over="body") + return "\n".join(list(iterator)) + + def _is_separator_required(self) -> bool: + return bool(self.header and self.env_body) + + @property + def _position_macro(self) -> str: + r"""Position macro, extracted from self.position, like [h].""" + return f"[{self.position}]" if self.position else "" + + @property + def _caption_macro(self) -> str: + r"""Caption macro, extracted from self.caption, like \caption{cap}.""" + return f"\\caption{{{self.caption}}}" if self.caption else "" + + @property + def _label_macro(self) -> str: + r"""Label macro, extracted from self.label, like \label{ref}.""" + return f"\\label{{{self.label}}}" if self.label else "" + + def _create_row_iterator(self, over: str) -> RowStringIterator: + """Create iterator over header or body of the table. Parameters ---------- - buf : string or file handle - File path or object. If not specified, the result is returned as - a string. - column_format : str - The columns format as specified in `LaTeX table format - `__ e.g 'rcl' - for 3 columns + over : {'body', 'header'} + Over what to iterate. + + Returns + ------- + RowStringIterator + Iterator over body or header. """ - if self._table_float: - # then write output in a nested table/tabular or longtable environment - if self.caption is None: - caption_ = "" - else: - caption_ = f"\n\\caption{{{self.caption}}}" + iterator_kind = self._select_iterator(over) + return iterator_kind( + formatter=self.fmt, + multicolumn=self.multicolumn, + multicolumn_format=self.multicolumn_format, + multirow=self.multirow, + ) + + def _select_iterator(self, over: str) -> Type[RowStringIterator]: + """Select proper iterator over table rows.""" + if over == "header": + return RowHeaderIterator + elif over == "body": + return RowBodyIterator + else: + msg = f"'over' must be either 'header' or 'body', but {over} was provided" + raise ValueError(msg) + + +class LongTableBuilder(GenericTableBuilder): + """Concrete table builder for longtable. + + >>> from pandas import DataFrame + >>> from pandas.io.formats import format as fmt + >>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + >>> formatter = fmt.DataFrameFormatter(df) + >>> builder = LongTableBuilder(formatter, caption='caption', label='lab', + ... column_format='lrl') + >>> table = builder.get_result() + >>> print(table) + \\begin{longtable}{lrl} + \\caption{caption} + \\label{lab}\\\\ + \\toprule + {} & a & b \\\\ + \\midrule + \\endhead + \\midrule + \\multicolumn{3}{r}{{Continued on next page}} \\\\ + \\midrule + \\endfoot + + \\bottomrule + \\endlastfoot + 0 & 1 & b1 \\\\ + 1 & 2 & b2 \\\\ + \\end{longtable} + + """ - if self.label is None: - label_ = "" - else: - label_ = f"\n\\label{{{self.label}}}" + @property + def env_begin(self) -> str: + first_row = ( + f"\\begin{{longtable}}{self._position_macro}{{{self.column_format}}}" + ) + elements = [first_row, f"{self._caption_and_label()}"] + return "\n".join([item for item in elements if item]) + + def _caption_and_label(self) -> str: + if self.caption or self.label: + double_backslash = "\\\\" + elements = [f"{self._caption_macro}", f"{self._label_macro}"] + caption_and_label = "\n".join([item for item in elements if item]) + caption_and_label += double_backslash + return caption_and_label + else: + return "" + + @property + def middle_separator(self) -> str: + iterator = self._create_row_iterator(over="header") + elements = [ + "\\midrule", + "\\endhead", + "\\midrule", + f"\\multicolumn{{{len(iterator.strcols)}}}{{r}}" + "{{Continued on next page}} \\\\", + "\\midrule", + "\\endfoot\n", + "\\bottomrule", + "\\endlastfoot", + ] + if self._is_separator_required(): + return "\n".join(elements) + return "" + + @property + def bottom_separator(self) -> str: + return "" + + @property + def env_end(self) -> str: + return "\\end{longtable}" + + +class RegularTableBuilder(GenericTableBuilder): + """Concrete table builder for regular table. + + >>> from pandas import DataFrame + >>> from pandas.io.formats import format as fmt + >>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + >>> formatter = fmt.DataFrameFormatter(df) + >>> builder = RegularTableBuilder(formatter, caption='caption', label='lab', + ... column_format='lrc') + >>> table = builder.get_result() + >>> print(table) + \\begin{table} + \\centering + \\caption{caption} + \\label{lab} + \\begin{tabular}{lrc} + \\toprule + {} & a & b \\\\ + \\midrule + 0 & 1 & b1 \\\\ + 1 & 2 & b2 \\\\ + \\bottomrule + \\end{tabular} + \\end{table} + + """ - if self.position is None: - position_ = "" - else: - position_ = f"[{self.position}]" + @property + def env_begin(self) -> str: + elements = [ + f"\\begin{{table}}{self._position_macro}", + "\\centering", + f"{self._caption_macro}", + f"{self._label_macro}", + f"\\begin{{tabular}}{{{self.column_format}}}", + ] + return "\n".join([item for item in elements if item]) + + @property + def bottom_separator(self) -> str: + return "\\bottomrule" + + @property + def env_end(self) -> str: + return "\n".join(["\\end{tabular}", "\\end{table}"]) + + +class TabularBuilder(GenericTableBuilder): + """Concrete table builder for tabular environment. + + >>> from pandas import DataFrame + >>> from pandas.io.formats import format as fmt + >>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + >>> formatter = fmt.DataFrameFormatter(df) + >>> builder = TabularBuilder(formatter, column_format='lrc') + >>> table = builder.get_result() + >>> print(table) + \\begin{tabular}{lrc} + \\toprule + {} & a & b \\\\ + \\midrule + 0 & 1 & b1 \\\\ + 1 & 2 & b2 \\\\ + \\bottomrule + \\end{tabular} + + """ - if self.longtable: - table_ = f"\\begin{{longtable}}{position_}{{{column_format}}}" - tabular_ = "\n" - else: - table_ = f"\\begin{{table}}{position_}\n\\centering" - tabular_ = f"\n\\begin{{tabular}}{{{column_format}}}\n" - - if self.longtable and (self.caption is not None or self.label is not None): - # a double-backslash is required at the end of the line - # as discussed here: - # https://tex.stackexchange.com/questions/219138 - backlash_ = "\\\\" - else: - backlash_ = "" - buf.write(f"{table_}{caption_}{label_}{backlash_}{tabular_}") - else: - if self.longtable: - tabletype_ = "longtable" - else: - tabletype_ = "tabular" - buf.write(f"\\begin{{{tabletype_}}}{{{column_format}}}\n") + @property + def env_begin(self) -> str: + return f"\\begin{{tabular}}{{{self.column_format}}}" + + @property + def bottom_separator(self) -> str: + return "\\bottomrule" + + @property + def env_end(self) -> str: + return "\\end{tabular}" + + +class LatexFormatter(TableFormatter): + """ + Used to render a DataFrame to a LaTeX tabular/longtable environment output. + + Parameters + ---------- + formatter : `DataFrameFormatter` + column_format : str, default None + The columns format as specified in `LaTeX table format + `__ e.g 'rcl' for 3 columns + + See Also + -------- + HTMLFormatter + """ + + def __init__( + self, + formatter: DataFrameFormatter, + longtable: bool = False, + column_format: Optional[str] = None, + multicolumn: bool = False, + multicolumn_format: Optional[str] = None, + multirow: bool = False, + caption: Optional[str] = None, + label: Optional[str] = None, + position: Optional[str] = None, + ): + self.fmt = formatter + self.frame = self.fmt.frame + self.longtable = longtable + self.column_format = column_format # type: ignore[assignment] + self.multicolumn = multicolumn + self.multicolumn_format = multicolumn_format + self.multirow = multirow + self.caption = caption + self.label = label + self.position = position - def _write_tabular_end(self, buf): + def write_result(self, buf: IO[str]) -> None: """ - Write the end of a tabular environment or nested table/tabular - environment. + Render a DataFrame to a LaTeX tabular, longtable, or table/tabular + environment output. + """ + table_string = self.builder.get_result() + buf.write(table_string) - Parameters - ---------- - buf : string or file handle - File path or object. If not specified, the result is returned as - a string. + @property + def builder(self) -> TableBuilderAbstract: + """Concrete table builder. + Returns + ------- + TableBuilder """ + builder = self._select_builder() + return builder( + formatter=self.fmt, + column_format=self.column_format, + multicolumn=self.multicolumn, + multicolumn_format=self.multicolumn_format, + multirow=self.multirow, + caption=self.caption, + label=self.label, + position=self.position, + ) + + def _select_builder(self) -> Type[TableBuilderAbstract]: + """Select proper table builder.""" if self.longtable: - buf.write("\\end{longtable}\n") + return LongTableBuilder + if any([self.caption, self.label, self.position]): + return RegularTableBuilder + return TabularBuilder + + @property + def column_format(self) -> str: + """Column format.""" + return self._column_format + + @column_format.setter + def column_format(self, input_column_format: Optional[str]) -> None: + """Setter for column format.""" + if input_column_format is None: + self._column_format = ( + self._get_index_format() + self._get_column_format_based_on_dtypes() + ) + elif not isinstance(input_column_format, str): + raise ValueError( + f"column_format must be str or unicode, " + f"not {type(input_column_format)}" + ) else: - buf.write("\\bottomrule\n") - buf.write("\\end{tabular}\n") - if self._table_float: - buf.write("\\end{table}\n") - else: - pass + self._column_format = input_column_format + + def _get_column_format_based_on_dtypes(self) -> str: + """Get column format based on data type. + + Right alignment for numbers and left - for strings. + """ + + def get_col_type(dtype): + if issubclass(dtype.type, np.number): + return "r" + return "l" + + dtypes = self.frame.dtypes._values + return "".join(map(get_col_type, dtypes)) + + def _get_index_format(self) -> str: + """Get index column format.""" + return "l" * self.frame.index.nlevels if self.fmt.index else "" + + +def _escape_symbols(row: List[str]) -> List[str]: + """Carry out string replacements for special symbols. + + Parameters + ---------- + row : list + List of string, that may contain special symbols. + + Returns + ------- + list + list of strings with the special symbols replaced. + """ + return [ + ( + x.replace("\\", "\\textbackslash ") + .replace("_", "\\_") + .replace("%", "\\%") + .replace("$", "\\$") + .replace("#", "\\#") + .replace("{", "\\{") + .replace("}", "\\}") + .replace("~", "\\textasciitilde ") + .replace("^", "\\textasciicircum ") + .replace("&", "\\&") + if (x and x != "{}") + else "{}" + ) + for x in row + ] + + +def _convert_to_bold(crow: List[str], ilevels: int) -> List[str]: + """Convert elements in ``crow`` to bold.""" + return [ + f"\\textbf{{{x}}}" if j < ilevels and x.strip() not in ["", "{}"] else x + for j, x in enumerate(crow) + ] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index 9dfd851e91c65..a98644250b328 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -7,6 +7,14 @@ from pandas import DataFrame, Series import pandas._testing as tm +from pandas.io.formats.format import DataFrameFormatter +from pandas.io.formats.latex import ( + RegularTableBuilder, + RowBodyIterator, + RowHeaderIterator, + RowStringConverter, +) + class TestToLatex: def test_to_latex_filename(self, float_frame): @@ -60,6 +68,16 @@ def test_to_latex(self, float_frame): assert withoutindex_result == withoutindex_expected + @pytest.mark.parametrize( + "bad_column_format", + [5, 1.2, ["l", "r"], ("r", "c"), {"r", "c", "l"}, dict(a="r", b="l")], + ) + def test_to_latex_bad_column_format(self, bad_column_format): + df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + msg = r"column_format must be str or unicode" + with pytest.raises(ValueError, match=msg): + df.to_latex(column_format=bad_column_format) + def test_to_latex_format(self, float_frame): # GH Bug #9402 float_frame.to_latex(column_format="ccc") @@ -930,3 +948,87 @@ def test_to_latex_multindex_header(self): \end{tabular} """ assert observed == expected + + +class TestTableBuilder: + @pytest.fixture + def dataframe(self): + return DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + + @pytest.fixture + def table_builder(self, dataframe): + return RegularTableBuilder(formatter=DataFrameFormatter(dataframe)) + + def test_create_row_iterator(self, table_builder): + iterator = table_builder._create_row_iterator(over="header") + assert isinstance(iterator, RowHeaderIterator) + + def test_create_body_iterator(self, table_builder): + iterator = table_builder._create_row_iterator(over="body") + assert isinstance(iterator, RowBodyIterator) + + def test_create_body_wrong_kwarg_raises(self, table_builder): + with pytest.raises(ValueError, match="must be either 'header' or 'body'"): + table_builder._create_row_iterator(over="SOMETHING BAD") + + +class TestRowStringConverter: + @pytest.mark.parametrize( + "row_num, expected", + [ + (0, r"{} & Design & ratio & xy \\"), + (1, r"0 & 1 & 4 & 10 \\"), + (2, r"1 & 2 & 5 & 11 \\"), + ], + ) + def test_get_strrow_normal_without_escape(self, row_num, expected): + df = DataFrame({r"Design": [1, 2, 3], r"ratio": [4, 5, 6], r"xy": [10, 11, 12]}) + row_string_converter = RowStringConverter( + formatter=DataFrameFormatter(df, escape=True), + ) + assert row_string_converter.get_strrow(row_num=row_num) == expected + + @pytest.mark.parametrize( + "row_num, expected", + [ + (0, r"{} & Design \# & ratio, \% & x\&y \\"), + (1, r"0 & 1 & 4 & 10 \\"), + (2, r"1 & 2 & 5 & 11 \\"), + ], + ) + def test_get_strrow_normal_with_escape(self, row_num, expected): + df = DataFrame( + {r"Design #": [1, 2, 3], r"ratio, %": [4, 5, 6], r"x&y": [10, 11, 12]} + ) + row_string_converter = RowStringConverter( + formatter=DataFrameFormatter(df, escape=True), + ) + assert row_string_converter.get_strrow(row_num=row_num) == expected + + @pytest.mark.parametrize( + "row_num, expected", + [ + (0, r"{} & \multicolumn{2}{r}{c1} & \multicolumn{2}{r}{c2} & c3 \\"), + (1, r"{} & 0 & 1 & 0 & 1 & 0 \\"), + (2, r"0 & 0 & 5 & 0 & 5 & 0 \\"), + ], + ) + def test_get_strrow_multindex_multicolumn(self, row_num, expected): + df = DataFrame( + { + ("c1", 0): {x: x for x in range(5)}, + ("c1", 1): {x: x + 5 for x in range(5)}, + ("c2", 0): {x: x for x in range(5)}, + ("c2", 1): {x: x + 5 for x in range(5)}, + ("c3", 0): {x: x for x in range(5)}, + } + ) + + row_string_converter = RowStringConverter( + formatter=DataFrameFormatter(df), + multicolumn=True, + multicolumn_format="r", + multirow=True, + ) + + assert row_string_converter.get_strrow(row_num=row_num) == expected From 2a5e3cc9b7b59c0170e12947f798de1fef9fee12 Mon Sep 17 00:00:00 2001 From: Jonathan Shreckengost Date: Mon, 7 Sep 2020 15:04:42 -0400 Subject: [PATCH 462/632] Comma cleanup (#36168) --- pandas/tests/indexing/test_iloc.py | 2 +- pandas/tests/indexing/test_indexing.py | 2 +- pandas/tests/indexing/test_loc.py | 33 +++++++------------- pandas/tests/internals/test_internals.py | 8 ++--- pandas/tests/io/formats/test_css.py | 12 +++---- pandas/tests/io/formats/test_info.py | 12 +++---- pandas/tests/io/json/test_compression.py | 2 +- pandas/tests/io/json/test_pandas.py | 10 ++---- pandas/tests/io/parser/test_c_parser_only.py | 4 +-- pandas/tests/io/parser/test_parse_dates.py | 4 +-- pandas/tests/io/parser/test_usecols.py | 2 +- 11 files changed, 34 insertions(+), 57 deletions(-) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 4fae01ec710fd..bfb62835add93 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -56,7 +56,7 @@ def test_is_scalar_access(self): assert ser.iloc._is_scalar_access((1,)) df = ser.to_frame() - assert df.iloc._is_scalar_access((1, 0,)) + assert df.iloc._is_scalar_access((1, 0)) def test_iloc_exceeds_bounds(self): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index a080c5d169215..ca8a3ddc95575 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -1004,7 +1004,7 @@ def test_extension_array_cross_section(): def test_extension_array_cross_section_converts(): # all numeric columns -> numeric series df = pd.DataFrame( - {"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"], + {"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"] ) result = df.loc["a"] expected = pd.Series([1, 1], dtype="Int64", index=["A", "B"], name="a") diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 193800fae751f..e42d9679464d8 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -29,13 +29,11 @@ def test_loc_getitem_label_out_of_range(self): # out of range label self.check_result( - "loc", "f", typs=["ints", "uints", "labels", "mixed", "ts"], fails=KeyError, + "loc", "f", typs=["ints", "uints", "labels", "mixed", "ts"], fails=KeyError ) self.check_result("loc", "f", typs=["floats"], fails=KeyError) self.check_result("loc", "f", typs=["floats"], fails=KeyError) - self.check_result( - "loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError, - ) + self.check_result("loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError) self.check_result("loc", 20, typs=["labels"], fails=KeyError) self.check_result("loc", 20, typs=["ts"], axes=0, fails=KeyError) self.check_result("loc", 20, typs=["floats"], axes=0, fails=KeyError) @@ -46,26 +44,24 @@ def test_loc_getitem_label_list(self): pass def test_loc_getitem_label_list_with_missing(self): + self.check_result("loc", [0, 1, 2], typs=["empty"], fails=KeyError) self.check_result( - "loc", [0, 1, 2], typs=["empty"], fails=KeyError, - ) - self.check_result( - "loc", [0, 2, 10], typs=["ints", "uints", "floats"], axes=0, fails=KeyError, + "loc", [0, 2, 10], typs=["ints", "uints", "floats"], axes=0, fails=KeyError ) self.check_result( - "loc", [3, 6, 7], typs=["ints", "uints", "floats"], axes=1, fails=KeyError, + "loc", [3, 6, 7], typs=["ints", "uints", "floats"], axes=1, fails=KeyError ) # GH 17758 - MultiIndex and missing keys self.check_result( - "loc", [(1, 3), (1, 4), (2, 5)], typs=["multi"], axes=0, fails=KeyError, + "loc", [(1, 3), (1, 4), (2, 5)], typs=["multi"], axes=0, fails=KeyError ) def test_loc_getitem_label_list_fails(self): # fails self.check_result( - "loc", [20, 30, 40], typs=["ints", "uints"], axes=1, fails=KeyError, + "loc", [20, 30, 40], typs=["ints", "uints"], axes=1, fails=KeyError ) def test_loc_getitem_label_array_like(self): @@ -95,18 +91,14 @@ def test_loc_getitem_label_slice(self): ) self.check_result( - "loc", slice("20130102", "20130104"), typs=["ts"], axes=1, fails=TypeError, + "loc", slice("20130102", "20130104"), typs=["ts"], axes=1, fails=TypeError ) - self.check_result( - "loc", slice(2, 8), typs=["mixed"], axes=0, fails=TypeError, - ) - self.check_result( - "loc", slice(2, 8), typs=["mixed"], axes=1, fails=KeyError, - ) + self.check_result("loc", slice(2, 8), typs=["mixed"], axes=0, fails=TypeError) + self.check_result("loc", slice(2, 8), typs=["mixed"], axes=1, fails=KeyError) self.check_result( - "loc", slice(2, 4, 2), typs=["mixed"], axes=0, fails=TypeError, + "loc", slice(2, 4, 2), typs=["mixed"], axes=0, fails=TypeError ) def test_setitem_from_duplicate_axis(self): @@ -669,8 +661,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value): (1, ["A", "B", "C"]), np.array([7, 8, 9], dtype=np.int64), pd.DataFrame( - [[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], - columns=["A", "B", "C"], + [[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], columns=["A", "B", "C"] ), ), ( diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 06ccdd2484a2a..1d73d1e35728b 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -892,16 +892,16 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value): fill_value, ) assert_reindex_indexer_is_ok( - mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value, + mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value ) assert_reindex_indexer_is_ok( - mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value, + mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value ) assert_reindex_indexer_is_ok( mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 0, 0], fill_value ) assert_reindex_indexer_is_ok( - mgr, ax, pd.Index(["foo", "bar", "baz"]), [-1, 0, -1], fill_value, + mgr, ax, pd.Index(["foo", "bar", "baz"]), [-1, 0, -1], fill_value ) assert_reindex_indexer_is_ok( mgr, @@ -913,7 +913,7 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value): if mgr.shape[ax] >= 3: assert_reindex_indexer_is_ok( - mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 1, 2], fill_value, + mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 1, 2], fill_value ) diff --git a/pandas/tests/io/formats/test_css.py b/pandas/tests/io/formats/test_css.py index 9383f86e335fa..785904fafd31a 100644 --- a/pandas/tests/io/formats/test_css.py +++ b/pandas/tests/io/formats/test_css.py @@ -99,11 +99,11 @@ def test_css_side_shorthands(shorthand, expansions): top, right, bottom, left = expansions assert_resolves( - f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}, + f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"} ) assert_resolves( - f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}, + f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"} ) assert_resolves( @@ -189,9 +189,7 @@ def test_css_absolute_font_size(size, relative_to, resolved): inherited = None else: inherited = {"font-size": relative_to} - assert_resolves( - f"font-size: {size}", {"font-size": resolved}, inherited=inherited, - ) + assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited) @pytest.mark.parametrize( @@ -225,6 +223,4 @@ def test_css_relative_font_size(size, relative_to, resolved): inherited = None else: inherited = {"font-size": relative_to} - assert_resolves( - f"font-size: {size}", {"font-size": resolved}, inherited=inherited, - ) + assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited) diff --git a/pandas/tests/io/formats/test_info.py b/pandas/tests/io/formats/test_info.py index 877bd1650ae60..7000daeb9b575 100644 --- a/pandas/tests/io/formats/test_info.py +++ b/pandas/tests/io/formats/test_info.py @@ -299,7 +299,7 @@ def test_info_memory_usage(): DataFrame(1, index=["a"], columns=["A"]).memory_usage(index=True) DataFrame(1, index=["a"], columns=["A"]).index.nbytes df = DataFrame( - data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"], + data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"] ) df.index.nbytes df.memory_usage(index=True) @@ -336,7 +336,7 @@ def test_info_memory_usage_deep_pypy(): @pytest.mark.skipif(PYPY, reason="PyPy getsizeof() fails by design") def test_usage_via_getsizeof(): df = DataFrame( - data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"], + data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"] ) mem = df.memory_usage(deep=True).sum() # sys.getsizeof will call the .memory_usage with @@ -359,16 +359,14 @@ def test_info_memory_usage_qualified(): buf = StringIO() df = DataFrame( - 1, columns=list("ab"), index=MultiIndex.from_product([range(3), range(3)]), + 1, columns=list("ab"), index=MultiIndex.from_product([range(3), range(3)]) ) df.info(buf=buf) assert "+" not in buf.getvalue() buf = StringIO() df = DataFrame( - 1, - columns=list("ab"), - index=MultiIndex.from_product([range(3), ["foo", "bar"]]), + 1, columns=list("ab"), index=MultiIndex.from_product([range(3), ["foo", "bar"]]) ) df.info(buf=buf) assert "+" in buf.getvalue() @@ -384,7 +382,7 @@ def memory_usage(f): N = 100 M = len(uppercase) index = MultiIndex.from_product( - [list(uppercase), date_range("20160101", periods=N)], names=["id", "date"], + [list(uppercase), date_range("20160101", periods=N)], names=["id", "date"] ) df = DataFrame({"value": np.random.randn(N * M)}, index=index) diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index c0e3220454bf1..a41af9886c617 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -45,7 +45,7 @@ def test_with_s3_url(compression, s3_resource, s3so): s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f) roundtripped_df = pd.read_json( - "s3://pandas-test/test-1", compression=compression, storage_options=s3so, + "s3://pandas-test/test-1", compression=compression, storage_options=s3so ) tm.assert_frame_equal(df, roundtripped_df) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 59d64e1a6e909..13152f01abb04 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -745,11 +745,7 @@ def test_reconstruction_index(self): def test_path(self, float_frame, int_frame, datetime_frame): with tm.ensure_clean("test.json") as path: - for df in [ - float_frame, - int_frame, - datetime_frame, - ]: + for df in [float_frame, int_frame, datetime_frame]: df.to_json(path) read_json(path) @@ -1706,9 +1702,7 @@ def test_to_s3(self, s3_resource, s3so): # GH 28375 mock_bucket_name, target_file = "pandas-test", "test.json" df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) - df.to_json( - f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so, - ) + df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so) timeout = 5 while True: if target_file in ( diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 50179fc1ec4b8..50d5fb3e49c2a 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -646,9 +646,7 @@ def test_1000_sep_with_decimal( tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize( - "float_precision", [None, "high", "round_trip"], -) +@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"]) @pytest.mark.parametrize( "value,expected", [ diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index ed947755e3419..833186b69c63b 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1439,7 +1439,7 @@ def test_parse_timezone(all_parsers): end="2018-01-04 09:05:00", freq="1min", tz=pytz.FixedOffset(540), - ), + ) ), freq=None, ) @@ -1553,5 +1553,5 @@ def test_missing_parse_dates_column_raises( msg = f"Missing column provided to 'parse_dates': '{missing_cols}'" with pytest.raises(ValueError, match=msg): parser.read_csv( - content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates, + content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates ) diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py index d4e049cc3fcc2..7e9c9866a666d 100644 --- a/pandas/tests/io/parser/test_usecols.py +++ b/pandas/tests/io/parser/test_usecols.py @@ -199,7 +199,7 @@ def test_usecols_with_whitespace(all_parsers): # Column selection by index. ([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])), # Column selection by name. - (["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"]),), + (["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])), ], ) def test_usecols_with_integer_like_header(all_parsers, usecols, expected): From b2fc5ac1899dece8353d91d1d54252d9a759e65a Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 7 Sep 2020 20:05:50 +0100 Subject: [PATCH 463/632] TST: test_datetime64_factorize on 32bit (#36192) --- pandas/tests/test_algos.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ec7413514d430..a2c2ae22a0b62 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -256,7 +256,7 @@ def test_datetime64_factorize(self, writable): # GH35650 Verify whether read-only datetime64 array can be factorized data = np.array([np.datetime64("2020-01-01T00:00:00.000")]) data.setflags(write=writable) - expected_codes = np.array([0], dtype=np.int64) + expected_codes = np.array([0], dtype=np.intp) expected_uniques = np.array( ["2020-01-01T00:00:00.000000000"], dtype="datetime64[ns]" ) From dd5d94551567e2bddce5a35346406d8074a36c87 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 7 Sep 2020 20:39:15 +0100 Subject: [PATCH 464/632] TST: update test_series_factorize_na_sentinel_none for 32bit (#36191) --- pandas/tests/base/test_factorize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/tests/base/test_factorize.py b/pandas/tests/base/test_factorize.py index 9fad9856d53cc..f8cbadb987d29 100644 --- a/pandas/tests/base/test_factorize.py +++ b/pandas/tests/base/test_factorize.py @@ -34,7 +34,7 @@ def test_series_factorize_na_sentinel_none(): ser = pd.Series(values) codes, uniques = ser.factorize(na_sentinel=None) - expected_codes = np.array([0, 1, 0, 2], dtype="int64") + expected_codes = np.array([0, 1, 0, 2], dtype=np.intp) expected_uniques = pd.Index([1.0, 2.0, np.nan]) tm.assert_numpy_array_equal(codes, expected_codes) From 12d3453c1ca82a22ed26dfc9bcb06b20384cd9a7 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Mon, 7 Sep 2020 20:41:42 +0100 Subject: [PATCH 465/632] DOC: move release note for #36155 (#36187) --- doc/source/whatsnew/v1.1.2.rst | 1 + doc/source/whatsnew/v1.2.0.rst | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index da261907565a1..e9cba3de56920 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -50,6 +50,7 @@ Bug fixes Other ~~~~~ - :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize` (:issue:`35667`) +- :meth:`DataFrame.plot` and meth:`Series.plot` raise ``UserWarning`` about usage of FixedFormatter and FixedLocator (:issue:`35684` and :issue:`35945`) .. --------------------------------------------------------------------------- diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 9a778acba4764..ccaae9f996425 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -300,7 +300,6 @@ Plotting ^^^^^^^^ - Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`) -- meth:`DataFrame.plot` and meth:`Series.plot` raise ``UserWarning`` about usage of FixedFormatter and FixedLocator (:issue:`35684` and :issue:`35945`) Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ From 7584e590056da2e34ae882ebb2e6ea9159f74085 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 7 Sep 2020 12:56:18 -0700 Subject: [PATCH 466/632] REF: use _validate_foo pattern in Categorical (#36181) --- pandas/core/arrays/categorical.py | 31 ++++++++++++++++++++++--------- pandas/core/indexes/category.py | 11 +++-------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 02305479bef67..228e630f95863 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1192,6 +1192,26 @@ def map(self, mapper): __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) + def _validate_insert_value(self, value) -> int: + code = self.categories.get_indexer([value]) + if (code == -1) and not (is_scalar(value) and isna(value)): + raise TypeError( + "cannot insert an item into a CategoricalIndex " + "that is not already an existing category" + ) + return code[0] + + def _validate_searchsorted_value(self, value): + # searchsorted is very performance sensitive. By converting codes + # to same dtype as self.codes, we get much faster performance. + if is_scalar(value): + codes = self.categories.get_loc(value) + codes = self.codes.dtype.type(codes) + else: + locs = [self.categories.get_loc(x) for x in value] + codes = np.array(locs, dtype=self.codes.dtype) + return codes + def _validate_fill_value(self, fill_value): """ Convert a user-facing fill_value to a representation to use with our @@ -1299,15 +1319,8 @@ def memory_usage(self, deep=False): @doc(_shared_docs["searchsorted"], klass="Categorical") def searchsorted(self, value, side="left", sorter=None): - # searchsorted is very performance sensitive. By converting codes - # to same dtype as self.codes, we get much faster performance. - if is_scalar(value): - codes = self.categories.get_loc(value) - codes = self.codes.dtype.type(codes) - else: - locs = [self.categories.get_loc(x) for x in value] - codes = np.array(locs, dtype=self.codes.dtype) - return self.codes.searchsorted(codes, side=side, sorter=sorter) + value = self._validate_searchsorted_value(value) + return self.codes.searchsorted(value, side=side, sorter=sorter) def isna(self): """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index cbb30763797d1..d38f77aaceb01 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -20,7 +20,7 @@ pandas_dtype, ) from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, notna from pandas.core import accessor from pandas.core.algorithms import take_1d @@ -734,15 +734,10 @@ def insert(self, loc: int, item): ValueError if the item is not in the categories """ - code = self.categories.get_indexer([item]) - if (code == -1) and not (is_scalar(item) and isna(item)): - raise TypeError( - "cannot insert an item into a CategoricalIndex " - "that is not already an existing category" - ) + code = self._data._validate_insert_value(item) codes = self.codes - codes = np.concatenate((codes[:loc], code, codes[loc:])) + codes = np.concatenate((codes[:loc], [code], codes[loc:])) return self._create_from_codes(codes) def _concat(self, to_concat, name): From 971c60e4324feb27df8ef7656247c7b8703944a5 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 7 Sep 2020 13:46:33 -0700 Subject: [PATCH 467/632] DTA/TDA/PA use self._data instead of self.asi8 for self._ndarray (#36171) --- pandas/core/arrays/datetimelike.py | 50 +++++++++++--------- pandas/core/arrays/datetimes.py | 4 ++ pandas/core/arrays/period.py | 4 ++ pandas/core/arrays/timedeltas.py | 4 ++ pandas/tests/frame/indexing/test_datetime.py | 4 +- 5 files changed, 43 insertions(+), 23 deletions(-) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index a5b8032974fa4..a218745db0a44 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -27,7 +27,7 @@ from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning -from pandas.util._decorators import Appender, Substitution +from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.common import ( @@ -175,6 +175,14 @@ def _scalar_from_string(self, value: str) -> DTScalarOrNaT: """ raise AbstractMethodError(self) + @classmethod + def _rebox_native(cls, value: int) -> Union[int, np.datetime64, np.timedelta64]: + """ + Box an integer unboxed via _unbox_scalar into the native type for + the underlying ndarray. + """ + raise AbstractMethodError(cls) + def _unbox_scalar(self, value: DTScalarOrNaT) -> int: """ Unbox the integer value of a scalar `value`. @@ -458,18 +466,15 @@ class DatetimeLikeArrayMixin( # ------------------------------------------------------------------ # NDArrayBackedExtensionArray compat - # TODO: make this a cache_readonly; need to get around _index_data - # kludge in libreduction - @property + @cache_readonly def _ndarray(self) -> np.ndarray: - # NB: A bunch of Interval tests fail if we use ._data - return self.asi8 + return self._data def _from_backing_data(self: _T, arr: np.ndarray) -> _T: # Note: we do not retain `freq` - # error: Too many arguments for "NDArrayBackedExtensionArray" - # error: Unexpected keyword argument "dtype" for "NDArrayBackedExtensionArray" - return type(self)(arr, dtype=self.dtype) # type: ignore[call-arg] + return type(self)._simple_new( # type: ignore[attr-defined] + arr, dtype=self.dtype + ) # ------------------------------------------------------------------ @@ -526,7 +531,7 @@ def __array__(self, dtype=None) -> np.ndarray: # used for Timedelta/DatetimeArray, overwritten by PeriodArray if is_object_dtype(dtype): return np.array(list(self), dtype=object) - return self._data + return self._ndarray def __getitem__(self, key): """ @@ -536,7 +541,7 @@ def __getitem__(self, key): if lib.is_integer(key): # fast-path - result = self._data[key] + result = self._ndarray[key] if self.ndim == 1: return self._box_func(result) return self._simple_new(result, dtype=self.dtype) @@ -557,7 +562,7 @@ def __getitem__(self, key): key = check_array_indexer(self, key) freq = self._get_getitem_freq(key) - result = self._data[key] + result = self._ndarray[key] if lib.is_scalar(result): return self._box_func(result) return self._simple_new(result, dtype=self.dtype, freq=freq) @@ -612,7 +617,7 @@ def __setitem__( value = self._validate_setitem_value(value) key = check_array_indexer(self, key) - self._data[key] = value + self._ndarray[key] = value self._maybe_clear_freq() def _maybe_clear_freq(self): @@ -663,8 +668,8 @@ def astype(self, dtype, copy=True): def view(self, dtype=None): if dtype is None or dtype is self.dtype: - return type(self)(self._data, dtype=self.dtype) - return self._data.view(dtype=dtype) + return type(self)(self._ndarray, dtype=self.dtype) + return self._ndarray.view(dtype=dtype) # ------------------------------------------------------------------ # ExtensionArray Interface @@ -705,7 +710,7 @@ def _from_factorized(cls, values, original): return cls(values, dtype=original.dtype) def _values_for_argsort(self): - return self._data + return self._ndarray # ------------------------------------------------------------------ # Validation Methods @@ -722,7 +727,7 @@ def _validate_fill_value(self, fill_value): Returns ------- - fill_value : np.int64 + fill_value : np.int64, np.datetime64, or np.timedelta64 Raises ------ @@ -736,7 +741,8 @@ def _validate_fill_value(self, fill_value): fill_value = self._validate_scalar(fill_value, msg) except TypeError as err: raise ValueError(msg) from err - return self._unbox(fill_value) + rv = self._unbox(fill_value) + return self._rebox_native(rv) def _validate_shift_value(self, fill_value): # TODO(2.0): once this deprecation is enforced, use _validate_fill_value @@ -951,9 +957,9 @@ def value_counts(self, dropna=False): from pandas import Index, Series if dropna: - values = self[~self.isna()]._data + values = self[~self.isna()]._ndarray else: - values = self._data + values = self._ndarray cls = type(self) @@ -1044,9 +1050,9 @@ def fillna(self, value=None, method=None, limit=None): else: func = missing.backfill_1d - values = self._data + values = self._ndarray if not is_period_dtype(self.dtype): - # For PeriodArray self._data is i8, which gets copied + # For PeriodArray self._ndarray is i8, which gets copied # by `func`. Otherwise we need to make a copy manually # to avoid modifying `self` in-place. values = values.copy() diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 1bea3a9eb137e..d913e7be9ae5f 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -446,6 +446,10 @@ def _generate_range( # ----------------------------------------------------------------- # DatetimeLike Interface + @classmethod + def _rebox_native(cls, value: int) -> np.datetime64: + return np.int64(value).view("M8[ns]") + def _unbox_scalar(self, value): if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timestamp.") diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index cc39ffb5d1203..c3a9430736969 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -253,6 +253,10 @@ def _generate_range(cls, start, end, periods, freq, fields): # ----------------------------------------------------------------- # DatetimeLike Interface + @classmethod + def _rebox_native(cls, value: int) -> np.int64: + return np.int64(value) + def _unbox_scalar(self, value: Union[Period, NaTType]) -> int: if value is NaT: return value.value diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 2d694c469b3a9..485ebb49a376d 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -271,6 +271,10 @@ def _generate_range(cls, start, end, periods, freq, closed=None): # ---------------------------------------------------------------- # DatetimeLike Interface + @classmethod + def _rebox_native(cls, value: int) -> np.timedelta64: + return np.int64(value).view("m8[ns]") + def _unbox_scalar(self, value): if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timedelta.") diff --git a/pandas/tests/frame/indexing/test_datetime.py b/pandas/tests/frame/indexing/test_datetime.py index 1937a4c380dc9..1866ac341def6 100644 --- a/pandas/tests/frame/indexing/test_datetime.py +++ b/pandas/tests/frame/indexing/test_datetime.py @@ -23,7 +23,9 @@ def test_setitem(self, timezone_frame): b1 = df._mgr.blocks[1] b2 = df._mgr.blocks[2] tm.assert_extension_array_equal(b1.values, b2.values) - assert id(b1.values._data.base) != id(b2.values._data.base) + b1base = b1.values._data.base + b2base = b2.values._data.base + assert b1base is None or (id(b1base) != id(b2base)) # with nan df2 = df.copy() From b37c9f8f9c904a434a73fcb5c6e99b87692b0429 Mon Sep 17 00:00:00 2001 From: Thomas Dickson Date: Mon, 7 Sep 2020 21:47:39 +0100 Subject: [PATCH 468/632] TST verify groupby doesn't alter unit64s to floats #30859 (#36164) --- pandas/tests/groupby/test_groupby.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index e0196df7ceac0..69397228dd941 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1183,6 +1183,18 @@ def test_groupby_dtype_inference_empty(): tm.assert_frame_equal(result, expected, by_blocks=True) +def test_groupby_unit64_float_conversion(): + #  GH: 30859 groupby converts unit64 to floats sometimes + df = pd.DataFrame({"first": [1], "second": [1], "value": [16148277970000000000]}) + result = df.groupby(["first", "second"])["value"].max() + expected = pd.Series( + [16148277970000000000], + pd.MultiIndex.from_product([[1], [1]], names=["first", "second"]), + name="value", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_list_infer_array_like(df): result = df.groupby(list(df["A"])).mean() expected = df.groupby(df["A"]).mean() From ce0476f714ed195f581a9e3c8c3222fe6b9fee50 Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Mon, 7 Sep 2020 23:06:29 +0200 Subject: [PATCH 469/632] Fix compressed multiindex for output of groupby.rolling (#36152) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/window/rolling.py | 10 +++++----- pandas/tests/window/test_grouper.py | 21 +++++++++++++++++++++ 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index e9cba3de56920..28ce49c11b3f0 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -23,6 +23,7 @@ Fixed regressions - Regression in :meth:`DataFrame.replace` where a ``TypeError`` would be raised when attempting to replace elements of type :class:`Interval` (:issue:`35931`) - Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) +- Fixed regression in :meth:`Series.groupby.rolling` number of levels of :class:`MultiIndex` in input was compressed to one (:issue:`36018`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 4c4ec4d700b7f..235bd5364af02 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2211,17 +2211,17 @@ def _apply( # Compose MultiIndex result from grouping levels then rolling level # Aggregate the MultiIndex data as tuples then the level names grouped_object_index = self.obj.index - grouped_index_name = [grouped_object_index.name] + grouped_index_name = [*grouped_object_index.names] groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings] result_index_names = groupby_keys + grouped_index_name result_index_data = [] for key, values in self._groupby.grouper.indices.items(): for value in values: - if not is_list_like(key): - data = [key, grouped_object_index[value]] - else: - data = [*key, grouped_object_index[value]] + data = [ + *com.maybe_make_list(key), + *com.maybe_make_list(grouped_object_index[value]), + ] result_index_data.append(tuple(data)) result_index = MultiIndex.from_tuples( diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index 170bf100b3891..cb85ad7584da7 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -372,3 +372,24 @@ def test_groupby_subset_rolling_subset_with_closed(self): name="column1", ) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("func", ["max", "min"]) + def test_groupby_rolling_index_changed(self, func): + # GH: #36018 nlevels of MultiIndex changed + ds = Series( + [1, 2, 2], + index=pd.MultiIndex.from_tuples( + [("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"] + ), + name="a", + ) + + result = getattr(ds.groupby(ds).rolling(2), func)() + expected = Series( + [np.nan, np.nan, 2.0], + index=pd.MultiIndex.from_tuples( + [(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"] + ), + name="a", + ) + tm.assert_series_equal(result, expected) From 472e8462b1c29c35746ddbe4880d430e0d3d083a Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Mon, 7 Sep 2020 23:11:29 +0200 Subject: [PATCH 470/632] TST: DataFrame.replace: TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'unicode' (#36202) --- pandas/tests/frame/methods/test_replace.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index ea2488dfc0877..a77753ed9f9d0 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1599,3 +1599,11 @@ def test_replace_intervals(self): result = df.replace({"a": {pd.Interval(0, 1): "x"}}) expected = pd.DataFrame({"a": ["x", "x"]}) tm.assert_frame_equal(result, expected) + + def test_replace_unicode(self): + # GH: 16784 + columns_values_map = {"positive": {"正面": 1, "中立": 1, "负面": 0}} + df1 = pd.DataFrame({"positive": np.ones(3)}) + result = df1.replace(columns_values_map) + expected = pd.DataFrame({"positive": np.ones(3)}) + tm.assert_frame_equal(result, expected) From c1d7bbddf945de315995ed84663d672697f0185c Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 7 Sep 2020 14:43:12 -0700 Subject: [PATCH 471/632] REF: collect methods by topic (#36173) --- pandas/core/arrays/categorical.py | 148 +++++++++++++++++------------- pandas/core/indexes/category.py | 29 +++--- pandas/core/indexes/datetimes.py | 3 + pandas/core/indexes/interval.py | 64 +++++++------ pandas/core/indexes/multi.py | 2 + pandas/core/indexes/numeric.py | 42 +++++---- pandas/core/indexes/period.py | 67 +++++++------- pandas/core/indexes/range.py | 5 + pandas/core/indexes/timedeltas.py | 5 + 9 files changed, 212 insertions(+), 153 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 228e630f95863..58847528d2183 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -393,56 +393,6 @@ def __init__( self._dtype = self._dtype.update_dtype(dtype) self._codes = coerce_indexer_dtype(codes, dtype.categories) - @property - def categories(self): - """ - The categories of this categorical. - - Setting assigns new values to each category (effectively a rename of - each individual category). - - The assigned value has to be a list-like object. All items must be - unique and the number of items in the new categories must be the same - as the number of items in the old categories. - - Assigning to `categories` is a inplace operation! - - Raises - ------ - ValueError - If the new categories do not validate as categories or if the - number of new categories is unequal the number of old categories - - See Also - -------- - rename_categories : Rename categories. - reorder_categories : Reorder categories. - add_categories : Add new categories. - remove_categories : Remove the specified categories. - remove_unused_categories : Remove categories which are not used. - set_categories : Set the categories to the specified ones. - """ - return self.dtype.categories - - @categories.setter - def categories(self, categories): - new_dtype = CategoricalDtype(categories, ordered=self.ordered) - if self.dtype.categories is not None and len(self.dtype.categories) != len( - new_dtype.categories - ): - raise ValueError( - "new categories need to have the same number of " - "items as the old categories!" - ) - self._dtype = new_dtype - - @property - def ordered(self) -> Ordered: - """ - Whether the categories have an ordered relationship. - """ - return self.dtype.ordered - @property def dtype(self) -> CategoricalDtype: """ @@ -458,10 +408,6 @@ def _constructor(self) -> Type["Categorical"]: def _from_sequence(cls, scalars, dtype=None, copy=False): return Categorical(scalars, dtype=dtype) - def _formatter(self, boxed=False): - # Defer to CategoricalFormatter's formatter. - return None - def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: """ Coerce this type to another dtype @@ -640,6 +586,59 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): return cls(codes, dtype=dtype, fastpath=True) + # ------------------------------------------------------------------ + # Categories/Codes/Ordered + + @property + def categories(self): + """ + The categories of this categorical. + + Setting assigns new values to each category (effectively a rename of + each individual category). + + The assigned value has to be a list-like object. All items must be + unique and the number of items in the new categories must be the same + as the number of items in the old categories. + + Assigning to `categories` is a inplace operation! + + Raises + ------ + ValueError + If the new categories do not validate as categories or if the + number of new categories is unequal the number of old categories + + See Also + -------- + rename_categories : Rename categories. + reorder_categories : Reorder categories. + add_categories : Add new categories. + remove_categories : Remove the specified categories. + remove_unused_categories : Remove categories which are not used. + set_categories : Set the categories to the specified ones. + """ + return self.dtype.categories + + @categories.setter + def categories(self, categories): + new_dtype = CategoricalDtype(categories, ordered=self.ordered) + if self.dtype.categories is not None and len(self.dtype.categories) != len( + new_dtype.categories + ): + raise ValueError( + "new categories need to have the same number of " + "items as the old categories!" + ) + self._dtype = new_dtype + + @property + def ordered(self) -> Ordered: + """ + Whether the categories have an ordered relationship. + """ + return self.dtype.ordered + @property def codes(self) -> np.ndarray: """ @@ -1104,6 +1103,8 @@ def remove_unused_categories(self, inplace=False): if not inplace: return cat + # ------------------------------------------------------------------ + def map(self, mapper): """ Map categories using input correspondence (dict, Series, or function). @@ -1192,6 +1193,9 @@ def map(self, mapper): __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) + # ------------------------------------------------------------- + # Validators; ideally these can be de-duplicated + def _validate_insert_value(self, value) -> int: code = self.categories.get_indexer([value]) if (code == -1) and not (is_scalar(value) and isna(value)): @@ -1241,6 +1245,8 @@ def _validate_fill_value(self, fill_value): ) return fill_value + # ------------------------------------------------------------- + def __array__(self, dtype=None) -> np.ndarray: """ The numpy array interface. @@ -1758,6 +1764,10 @@ def __contains__(self, key) -> bool: # ------------------------------------------------------------------ # Rendering Methods + def _formatter(self, boxed=False): + # Defer to CategoricalFormatter's formatter. + return None + def _tidy_repr(self, max_vals=10, footer=True) -> str: """ a short repr displaying only max_vals and an optional (but default @@ -1987,7 +1997,9 @@ def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: result = dict(zip(categories, _result)) return result - # reduction ops # + # ------------------------------------------------------------------ + # Reductions + def _reduce(self, name: str, skipna: bool = True, **kwargs): func = getattr(self, name, None) if func is None: @@ -2090,6 +2102,9 @@ def mode(self, dropna=True): codes = sorted(htable.mode_int64(ensure_int64(codes), dropna)) return self._constructor(values=codes, dtype=self.dtype, fastpath=True) + # ------------------------------------------------------------------ + # ExtensionArray Interface + def unique(self): """ Return the ``Categorical`` which ``categories`` and ``codes`` are @@ -2179,6 +2194,18 @@ def equals(self, other: object) -> bool: return np.array_equal(self._codes, other_codes) return False + @property + def _can_hold_na(self): + return True + + @classmethod + def _concat_same_type(self, to_concat): + from pandas.core.dtypes.concat import union_categoricals + + return union_categoricals(to_concat) + + # ------------------------------------------------------------------ + def is_dtype_equal(self, other): """ Returns True if categoricals are the same dtype @@ -2217,17 +2244,6 @@ def describe(self): return result - # Implement the ExtensionArray interface - @property - def _can_hold_na(self): - return True - - @classmethod - def _concat_same_type(self, to_concat): - from pandas.core.dtypes.concat import union_categoricals - - return union_categoricals(to_concat) - def isin(self, values) -> np.ndarray: """ Check whether `values` are contained in Categorical. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d38f77aaceb01..7509cb35069e8 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -433,11 +433,6 @@ def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.astype("object") - def _maybe_cast_indexer(self, key): - code = self.categories.get_loc(key) - code = self.codes.dtype.type(code) - return code - @doc(Index.where) def where(self, cond, other=None): # TODO: Investigate an alternative implementation with @@ -537,6 +532,14 @@ def _reindex_non_unique(self, target): return new_target, indexer, new_indexer + # -------------------------------------------------------------------- + # Indexing Methods + + def _maybe_cast_indexer(self, key): + code = self.categories.get_loc(key) + code = self.codes.dtype.type(code) + return code + @Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) @@ -619,6 +622,15 @@ def _convert_arr_indexer(self, keyarr): def _convert_index_indexer(self, keyarr): return self._shallow_copy(keyarr) + @doc(Index._maybe_cast_slice_bound) + def _maybe_cast_slice_bound(self, label, side, kind): + if kind == "loc": + return label + + return super()._maybe_cast_slice_bound(label, side, kind) + + # -------------------------------------------------------------------- + def take_nd(self, *args, **kwargs): """Alias for `take`""" warnings.warn( @@ -628,13 +640,6 @@ def take_nd(self, *args, **kwargs): ) return self.take(*args, **kwargs) - @doc(Index._maybe_cast_slice_bound) - def _maybe_cast_slice_bound(self, label, side, kind): - if kind == "loc": - return label - - return super()._maybe_cast_slice_bound(label, side, kind) - def map(self, mapper): """ Map values using input correspondence (a dict, Series, or function). diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 3fd93a8159041..f0b80c2852bd5 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -509,6 +509,9 @@ def snap(self, freq="S"): dta = DatetimeArray(snapped, dtype=self.dtype) return DatetimeIndex._simple_new(dta, name=self.name) + # -------------------------------------------------------------------- + # Indexing Methods + def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): """ Calculate datetime bounds for parsed time string and its resolution. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 419ff81a2a478..3f72577c9420e 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -57,7 +57,7 @@ from pandas.core.ops import get_op_result_name if TYPE_CHECKING: - from pandas import CategoricalIndex + from pandas import CategoricalIndex # noqa:F401 _VALID_CLOSED = {"left", "right", "both", "neither"} _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -515,28 +515,6 @@ def is_overlapping(self) -> bool: # GH 23309 return self._engine.is_overlapping - def _should_fallback_to_positional(self) -> bool: - # integer lookups in Series.__getitem__ are unambiguously - # positional in this case - return self.dtype.subtype.kind in ["m", "M"] - - def _maybe_cast_slice_bound(self, label, side, kind): - return getattr(self, side)._maybe_cast_slice_bound(label, side, kind) - - @Appender(Index._convert_list_indexer.__doc__) - def _convert_list_indexer(self, keyarr): - """ - we are passed a list-like indexer. Return the - indexer for matching intervals. - """ - locs = self.get_indexer_for(keyarr) - - # we have missing values - if (locs == -1).any(): - raise KeyError - - return locs - def _can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. @@ -668,6 +646,9 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False): return sub_idx._searchsorted_monotonic(label, side) + # -------------------------------------------------------------------- + # Indexing Methods + def get_loc( self, key, method: Optional[str] = None, tolerance=None ) -> Union[int, slice, np.ndarray]: @@ -885,6 +866,30 @@ def _convert_slice_indexer(self, key: slice, kind: str): return super()._convert_slice_indexer(key, kind) + def _should_fallback_to_positional(self) -> bool: + # integer lookups in Series.__getitem__ are unambiguously + # positional in this case + return self.dtype.subtype.kind in ["m", "M"] + + def _maybe_cast_slice_bound(self, label, side, kind): + return getattr(self, side)._maybe_cast_slice_bound(label, side, kind) + + @Appender(Index._convert_list_indexer.__doc__) + def _convert_list_indexer(self, keyarr): + """ + we are passed a list-like indexer. Return the + indexer for matching intervals. + """ + locs = self.get_indexer_for(keyarr) + + # we have missing values + if (locs == -1).any(): + raise KeyError + + return locs + + # -------------------------------------------------------------------- + @Appender(Index.where.__doc__) def where(self, cond, other=None): if other is None: @@ -1030,6 +1035,9 @@ def equals(self, other: object) -> bool: and self.closed == other.closed ) + # -------------------------------------------------------------------- + # Set Operations + @Appender(Index.intersection.__doc__) @SetopCheck(op_name="intersection") def intersection( @@ -1115,6 +1123,12 @@ def func(self, other, sort=sort): return func + union = _setop("union") + difference = _setop("difference") + symmetric_difference = _setop("symmetric_difference") + + # -------------------------------------------------------------------- + @property def is_all_dates(self) -> bool: """ @@ -1123,10 +1137,6 @@ def is_all_dates(self) -> bool: """ return False - union = _setop("union") - difference = _setop("difference") - symmetric_difference = _setop("symmetric_difference") - # TODO: arithmetic operations # GH#30817 until IntervalArray implements inequalities, get them from Index diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index e49a23935efbd..9630e154ccd17 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3154,6 +3154,8 @@ def _update_indexer(idxr, indexer=indexer): return indexer._values + # -------------------------------------------------------------------- + def _reorder_indexer( self, seq: Tuple[Union[Scalar, Iterable, AnyArrayLike], ...], diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index cd3f1f51a86d2..079f43cb2c66b 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -97,6 +97,9 @@ def _validate_dtype(cls, dtype: Dtype) -> None: f"Incorrect `dtype` passed: expected {expected}, received {dtype}" ) + # ---------------------------------------------------------------- + # Indexing Methods + @doc(Index._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side, kind): assert kind in ["loc", "getitem", None] @@ -104,6 +107,8 @@ def _maybe_cast_slice_bound(self, label, side, kind): # we will try to coerce to integers return self._maybe_cast_indexer(label) + # ---------------------------------------------------------------- + @doc(Index._shallow_copy) def _shallow_copy(self, values=None, name: Label = lib.no_default): if values is not None and not self._can_hold_na and values.dtype.kind == "f": @@ -293,6 +298,9 @@ class UInt64Index(IntegerIndex): _engine_type = libindex.UInt64Engine _default_dtype = np.dtype(np.uint64) + # ---------------------------------------------------------------- + # Indexing Methods + @doc(Index._convert_arr_indexer) def _convert_arr_indexer(self, keyarr): # Cast the indexer to uint64 if possible so that the values returned @@ -314,6 +322,8 @@ def _convert_index_indexer(self, keyarr): return keyarr.astype(np.uint64) return keyarr + # ---------------------------------------------------------------- + def _wrap_joined_index(self, joined, other): name = get_op_result_name(self, other) return UInt64Index(joined, name=name) @@ -385,6 +395,22 @@ def _convert_slice_indexer(self, key: slice, kind: str): # translate to locations return self.slice_indexer(key.start, key.stop, key.step, kind=kind) + @doc(Index.get_loc) + def get_loc(self, key, method=None, tolerance=None): + if is_bool(key): + # Catch this to avoid accidentally casting to 1.0 + raise KeyError(key) + + if is_float(key) and np.isnan(key): + nan_idxs = self._nan_idxs + if not len(nan_idxs): + raise KeyError(key) + elif len(nan_idxs) == 1: + return nan_idxs[0] + return nan_idxs + + return super().get_loc(key, method=method, tolerance=tolerance) + # ---------------------------------------------------------------- def _format_native_types( @@ -409,22 +435,6 @@ def __contains__(self, other: Any) -> bool: return is_float(other) and np.isnan(other) and self.hasnans - @doc(Index.get_loc) - def get_loc(self, key, method=None, tolerance=None): - if is_bool(key): - # Catch this to avoid accidentally casting to 1.0 - raise KeyError(key) - - if is_float(key) and np.isnan(key): - nan_idxs = self._nan_idxs - if not len(nan_idxs): - raise KeyError(key) - elif len(nan_idxs) == 1: - return nan_idxs[0] - return nan_idxs - - return super().get_loc(key, method=method, tolerance=tolerance) - @cache_readonly def is_unique(self) -> bool: return super().is_unique and self._nan_idxs.size < 2 diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index cdb502199c6f1..5282b6f0154b4 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -433,6 +433,41 @@ def inferred_type(self) -> str: # indexing return "period" + def insert(self, loc, item): + if not isinstance(item, Period) or self.freq != item.freq: + return self.astype(object).insert(loc, item) + + i8result = np.concatenate( + (self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8) + ) + arr = type(self._data)._simple_new(i8result, dtype=self.dtype) + return type(self)._simple_new(arr, name=self.name) + + def join(self, other, how="left", level=None, return_indexers=False, sort=False): + """ + See Index.join + """ + self._assert_can_do_setop(other) + + if not isinstance(other, PeriodIndex): + return self.astype(object).join( + other, how=how, level=level, return_indexers=return_indexers, sort=sort + ) + + # _assert_can_do_setop ensures we have matching dtype + result = Int64Index.join( + self, + other, + how=how, + level=level, + return_indexers=return_indexers, + sort=sort, + ) + return result + + # ------------------------------------------------------------------------ + # Indexing Methods + @Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): target = ensure_index(target) @@ -607,38 +642,6 @@ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True except KeyError as err: raise KeyError(key) from err - def insert(self, loc, item): - if not isinstance(item, Period) or self.freq != item.freq: - return self.astype(object).insert(loc, item) - - i8result = np.concatenate( - (self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8) - ) - arr = type(self._data)._simple_new(i8result, dtype=self.dtype) - return type(self)._simple_new(arr, name=self.name) - - def join(self, other, how="left", level=None, return_indexers=False, sort=False): - """ - See Index.join - """ - self._assert_can_do_setop(other) - - if not isinstance(other, PeriodIndex): - return self.astype(object).join( - other, how=how, level=level, return_indexers=return_indexers, sort=sort - ) - - # _assert_can_do_setop ensures we have matching dtype - result = Int64Index.join( - self, - other, - how=how, - level=level, - return_indexers=return_indexers, - sort=sort, - ) - return result - # ------------------------------------------------------------------------ # Set Operation Methods diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index f1457a9aac62b..684691501de5c 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -338,6 +338,9 @@ def __contains__(self, key: Any) -> bool: return False return key in self._range + # -------------------------------------------------------------------- + # Indexing Methods + @doc(Int64Index.get_loc) def get_loc(self, key, method=None, tolerance=None): if method is None and tolerance is None: @@ -379,6 +382,8 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): locs[valid] = len(self) - 1 - locs[valid] return ensure_platform_int(locs) + # -------------------------------------------------------------------- + def tolist(self): return list(self._range) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 85c8396dfd1fe..df08fda78823d 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -202,6 +202,9 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ return is_timedelta64_dtype(dtype) + # ------------------------------------------------------------------- + # Indexing Methods + def get_loc(self, key, method=None, tolerance=None): """ Get integer location for requested label @@ -248,6 +251,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): return label + # ------------------------------------------------------------------- + def is_type_compatible(self, typ) -> bool: return typ == self.inferred_type or typ == "timedelta" From 405b5c5c460c2f55e1fbf7a06d0cb19cdd8784d5 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 7 Sep 2020 15:41:00 -0700 Subject: [PATCH 472/632] REF: implement Categorical._validate_setitem_value (#36180) --- pandas/core/arrays/categorical.py | 35 +++++++++++++++--------------- pandas/core/arrays/datetimelike.py | 16 +++++++++----- 2 files changed, 28 insertions(+), 23 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 58847528d2183..b732db4c66003 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -9,7 +9,7 @@ from pandas._config import get_option -from pandas._libs import NaT, algos as libalgos, hashtable as htable +from pandas._libs import NaT, algos as libalgos, hashtable as htable, lib from pandas._typing import ArrayLike, Dtype, Ordered, Scalar from pandas.compat.numpy import function as nv from pandas.util._decorators import cache_readonly, deprecate_kwarg, doc @@ -1868,14 +1868,6 @@ def __repr__(self) -> str: # ------------------------------------------------------------------ - def _maybe_coerce_indexer(self, indexer): - """ - return an indexer coerced to the codes dtype - """ - if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "i": - indexer = indexer.astype(self._codes.dtype) - return indexer - def __getitem__(self, key): """ Return an item. @@ -1905,6 +1897,11 @@ def __setitem__(self, key, value): If (one or more) Value is not in categories or if a assigned `Categorical` does not have the same categories """ + key = self._validate_setitem_key(key) + value = self._validate_setitem_value(value) + self._ndarray[key] = value + + def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) # require identical categories set @@ -1934,12 +1931,19 @@ def __setitem__(self, key, value): "category, set the categories first" ) - # set by position - if isinstance(key, (int, np.integer)): + lindexer = self.categories.get_indexer(rvalue) + if isinstance(lindexer, np.ndarray) and lindexer.dtype.kind == "i": + lindexer = lindexer.astype(self._ndarray.dtype) + + return lindexer + + def _validate_setitem_key(self, key): + if lib.is_integer(key): + # set by position pass - # tuple of indexers (dataframe) elif isinstance(key, tuple): + # tuple of indexers (dataframe) # only allow 1 dimensional slicing, but can # in a 2-d case be passed (slice(None),....) if len(key) == 2: @@ -1951,17 +1955,14 @@ def __setitem__(self, key, value): else: raise AssertionError("invalid slicing for a 1-ndim categorical") - # slicing in Series or Categorical elif isinstance(key, slice): + # slicing in Series or Categorical pass # else: array of True/False in Series or Categorical - lindexer = self.categories.get_indexer(rvalue) - lindexer = self._maybe_coerce_indexer(lindexer) - key = check_array_indexer(self, key) - self._codes[key] = lindexer + return key def _reverse_indexer(self) -> Dict[Hashable, np.ndarray]: """ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index a218745db0a44..2626890c2dbe5 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -546,6 +546,15 @@ def __getitem__(self, key): return self._box_func(result) return self._simple_new(result, dtype=self.dtype) + key = self._validate_getitem_key(key) + result = self._ndarray[key] + if lib.is_scalar(result): + return self._box_func(result) + + freq = self._get_getitem_freq(key) + return self._simple_new(result, dtype=self.dtype, freq=freq) + + def _validate_getitem_key(self, key): if com.is_bool_indexer(key): # first convert to boolean, because check_array_indexer doesn't # allow object dtype @@ -560,12 +569,7 @@ def __getitem__(self, key): pass else: key = check_array_indexer(self, key) - - freq = self._get_getitem_freq(key) - result = self._ndarray[key] - if lib.is_scalar(result): - return self._box_func(result) - return self._simple_new(result, dtype=self.dtype, freq=freq) + return key def _get_getitem_freq(self, key): """ From 592126c6910e9071f5e194b8a94043f748e0af54 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 7 Sep 2020 17:15:51 -0700 Subject: [PATCH 473/632] COMPAT: match numpy behavior for searchsorted on dt64/td64 (#36176) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/arrays/datetimelike.py | 7 +++---- pandas/tests/arrays/test_datetimelike.py | 11 ++++++++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index ccaae9f996425..2afa1f1a6199e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -228,6 +228,7 @@ Datetimelike - Bug in :class:`DateOffset` where attributes reconstructed from pickle files differ from original objects when input values exceed normal ranges (e.g months=12) (:issue:`34511`) - Bug in :meth:`DatetimeIndex.get_slice_bound` where ``datetime.date`` objects were not accepted or naive :class:`Timestamp` with a tz-aware :class:`DatetimeIndex` (:issue:`35690`) - Bug in :meth:`DatetimeIndex.slice_locs` where ``datetime.date`` objects were not accepted (:issue:`34077`) +- Bug in :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, and :meth:`Series.searchsorted` with ``datetime64`` or ``timedelta64`` dtype placement of ``NaT`` values being inconsistent with ``NumPy`` (:issue:`36176`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 2626890c2dbe5..6477b94a823ce 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -862,7 +862,8 @@ def _validate_searchsorted_value(self, value): # TODO: cast_str? we accept it for scalar value = self._validate_listlike(value, "searchsorted") - return self._unbox(value) + rv = self._unbox(value) + return self._rebox_native(rv) def _validate_setitem_value(self, value): msg = ( @@ -941,9 +942,7 @@ def searchsorted(self, value, side="left", sorter=None): Array of insertion points with the same shape as `value`. """ value = self._validate_searchsorted_value(value) - - # TODO: Use datetime64 semantics for sorting, xref GH#29844 - return self.asi8.searchsorted(value, side=side, sorter=sorter) + return self._data.searchsorted(value, side=side, sorter=sorter) def value_counts(self, dropna=False): """ diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index b1ab700427c28..292557fc04258 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -241,10 +241,15 @@ def test_searchsorted(self): expected = np.array([2, 3], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) - # Following numpy convention, NaT goes at the beginning - # (unlike NaN which goes at the end) + # GH#29884 match numpy convention on whether NaT goes + # at the end or the beginning result = arr.searchsorted(pd.NaT) - assert result == 0 + if _np_version_under1p18 or self.array_cls is PeriodArray: + # Following numpy convention, NaT goes at the beginning + # (unlike NaN which goes at the end) + assert result == 0 + else: + assert result == 10 def test_getitem_2d(self, arr1d): # 2d slicing on a 1D array From 19f0a9fa0293cf291ef1f7d6e39fa07ec21f56ed Mon Sep 17 00:00:00 2001 From: Nidhi Zare Date: Tue, 8 Sep 2020 06:00:19 +0530 Subject: [PATCH 474/632] pandas docs json_normalize example (#36194) Co-authored-by: Nidhi Zare --- pandas/io/json/_normalize.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 44765dbe74b46..2e1fc57e88ed1 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -176,7 +176,7 @@ def _json_normalize( ... 'fitness': {'height': 130, 'weight': 60}}, ... {'id': 2, 'name': 'Faye Raker', ... 'fitness': {'height': 130, 'weight': 60}}] - >>> json_normalize(data, max_level=0) + >>> pandas.json_normalize(data, max_level=0) fitness id name 0 {'height': 130, 'weight': 60} 1.0 Cole Volk 1 {'height': 130, 'weight': 60} NaN Mose Reg @@ -191,7 +191,7 @@ def _json_normalize( ... 'fitness': {'height': 130, 'weight': 60}}, ... {'id': 2, 'name': 'Faye Raker', ... 'fitness': {'height': 130, 'weight': 60}}] - >>> json_normalize(data, max_level=1) + >>> pandas.json_normalize(data, max_level=1) fitness.height fitness.weight id name 0 130 60 1.0 Cole Volk 1 130 60 NaN Mose Reg @@ -208,7 +208,7 @@ def _json_normalize( ... 'info': {'governor': 'John Kasich'}, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] - >>> result = json_normalize(data, 'counties', ['state', 'shortname', + >>> result = pandas.json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population state shortname info.governor @@ -219,7 +219,7 @@ def _json_normalize( 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {'A': [1, 2]} - >>> json_normalize(data, 'A', record_prefix='Prefix.') + >>> pandas.json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2 From fc75d9c96dbfb5fcc1cfb5c517fa9d313c1fb25d Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Tue, 8 Sep 2020 02:51:20 -0700 Subject: [PATCH 475/632] BUG: GroupbyRolling with an empty frame (#36208) Co-authored-by: Matt Roeschke --- doc/source/whatsnew/v1.1.2.rst | 2 +- pandas/core/window/rolling.py | 10 ++++++---- pandas/tests/window/test_grouper.py | 12 ++++++++++++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 28ce49c11b3f0..f13d38d1f8f76 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -24,7 +24,7 @@ Fixed regressions - Fix regression in pickle roundtrip of the ``closed`` attribute of :class:`IntervalIndex` (:issue:`35658`) - Fixed regression in :meth:`DataFrameGroupBy.agg` where a ``ValueError: buffer source array is read-only`` would be raised when the underlying array is read-only (:issue:`36014`) - Fixed regression in :meth:`Series.groupby.rolling` number of levels of :class:`MultiIndex` in input was compressed to one (:issue:`36018`) -- +- Fixed regression in :class:`DataFrameGroupBy` on an empty :class:`DataFrame` (:issue:`36197`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 235bd5364af02..9466ada3f4578 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2240,10 +2240,12 @@ def _create_blocks(self, obj: FrameOrSeriesUnion): """ # Ensure the object we're rolling over is monotonically sorted relative # to the groups - groupby_order = np.concatenate( - list(self._groupby.grouper.indices.values()) - ).astype(np.int64) - obj = obj.take(groupby_order) + # GH 36197 + if not obj.empty: + groupby_order = np.concatenate( + list(self._groupby.grouper.indices.values()) + ).astype(np.int64) + obj = obj.take(groupby_order) return super()._create_blocks(obj) def _get_cython_func_type(self, func: str) -> Callable: diff --git a/pandas/tests/window/test_grouper.py b/pandas/tests/window/test_grouper.py index cb85ad7584da7..786cf68d28871 100644 --- a/pandas/tests/window/test_grouper.py +++ b/pandas/tests/window/test_grouper.py @@ -393,3 +393,15 @@ def test_groupby_rolling_index_changed(self, func): name="a", ) tm.assert_series_equal(result, expected) + + def test_groupby_rolling_empty_frame(self): + # GH 36197 + expected = pd.DataFrame({"s1": []}) + result = expected.groupby("s1").rolling(window=1).sum() + expected.index = pd.MultiIndex.from_tuples([], names=["s1", None]) + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame({"s1": [], "s2": []}) + result = expected.groupby(["s1", "s2"]).rolling(window=1).sum() + expected.index = pd.MultiIndex.from_tuples([], names=["s1", "s2", None]) + tm.assert_frame_equal(result, expected) From d7fe22fba1daaf7be6549f74deb0975d00637c0a Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 8 Sep 2020 11:22:20 +0100 Subject: [PATCH 476/632] DOC: doc fix (#36205) --- doc/source/whatsnew/v1.1.2.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index f13d38d1f8f76..0e4a88f3ee56b 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -51,7 +51,7 @@ Bug fixes Other ~~~~~ - :meth:`factorize` now supports ``na_sentinel=None`` to include NaN in the uniques of the values and remove ``dropna`` keyword which was unintentionally exposed to public facing API in 1.1 version from :meth:`factorize` (:issue:`35667`) -- :meth:`DataFrame.plot` and meth:`Series.plot` raise ``UserWarning`` about usage of FixedFormatter and FixedLocator (:issue:`35684` and :issue:`35945`) +- :meth:`DataFrame.plot` and :meth:`Series.plot` raise ``UserWarning`` about usage of ``FixedFormatter`` and ``FixedLocator`` (:issue:`35684` and :issue:`35945`) .. --------------------------------------------------------------------------- From 81394d3be3a0876aaa0c7691df5c0058a600acd2 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 8 Sep 2020 12:50:13 +0100 Subject: [PATCH 477/632] DOC: release date for 1.1.2 (#36182) --- doc/source/whatsnew/v1.1.2.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index 0e4a88f3ee56b..a214ad9762733 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -1,7 +1,7 @@ .. _whatsnew_112: -What's new in 1.1.2 (??) ------------------------- +What's new in 1.1.2 (September 8, 2020) +--------------------------------------- These are the changes in pandas 1.1.2. See :ref:`release` for a full changelog including other versions of pandas. From 6625b899074d215c69337a72503d4fe141e5e107 Mon Sep 17 00:00:00 2001 From: Yanxian Lin Date: Tue, 8 Sep 2020 06:01:43 -0700 Subject: [PATCH 478/632] Fixed pandas.json_normalize doctests errors` (#36207) --- pandas/io/json/_normalize.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index 2e1fc57e88ed1..3ed0b5851b395 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -163,11 +163,11 @@ def _json_normalize( >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] - >>> pandas.json_normalize(data) - id name name.family name.first name.given name.last - 0 1.0 NaN NaN Coleen NaN Volk - 1 NaN NaN Regner NaN Mose NaN - 2 2.0 Faye Raker NaN NaN NaN NaN + >>> pd.json_normalize(data) + id name.first name.last name.given name.family name + 0 1.0 Coleen Volk NaN NaN NaN + 1 NaN NaN NaN Mose Regner NaN + 2 2.0 NaN NaN NaN NaN Faye Raker >>> data = [{'id': 1, ... 'name': "Cole Volk", @@ -176,11 +176,11 @@ def _json_normalize( ... 'fitness': {'height': 130, 'weight': 60}}, ... {'id': 2, 'name': 'Faye Raker', ... 'fitness': {'height': 130, 'weight': 60}}] - >>> pandas.json_normalize(data, max_level=0) - fitness id name - 0 {'height': 130, 'weight': 60} 1.0 Cole Volk - 1 {'height': 130, 'weight': 60} NaN Mose Reg - 2 {'height': 130, 'weight': 60} 2.0 Faye Raker + >>> pd.json_normalize(data, max_level=0) + id name fitness + 0 1.0 Cole Volk {'height': 130, 'weight': 60} + 1 NaN Mose Reg {'height': 130, 'weight': 60} + 2 2.0 Faye Raker {'height': 130, 'weight': 60} Normalizes nested data up to level 1. @@ -191,11 +191,11 @@ def _json_normalize( ... 'fitness': {'height': 130, 'weight': 60}}, ... {'id': 2, 'name': 'Faye Raker', ... 'fitness': {'height': 130, 'weight': 60}}] - >>> pandas.json_normalize(data, max_level=1) - fitness.height fitness.weight id name - 0 130 60 1.0 Cole Volk - 1 130 60 NaN Mose Reg - 2 130 60 2.0 Faye Raker + >>> pd.json_normalize(data, max_level=1) + id name fitness.height fitness.weight + 0 1.0 Cole Volk 130 60 + 1 NaN Mose Reg 130 60 + 2 2.0 Faye Raker 130 60 >>> data = [{'state': 'Florida', ... 'shortname': 'FL', @@ -208,7 +208,7 @@ def _json_normalize( ... 'info': {'governor': 'John Kasich'}, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] - >>> result = pandas.json_normalize(data, 'counties', ['state', 'shortname', + >>> result = pd.json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population state shortname info.governor @@ -219,7 +219,7 @@ def _json_normalize( 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {'A': [1, 2]} - >>> pandas.json_normalize(data, 'A', record_prefix='Prefix.') + >>> pd.json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2 From 6d340a9ee37b0814f86bd5d6ea0d2d8558a68c6b Mon Sep 17 00:00:00 2001 From: Irv Lustig Date: Tue, 8 Sep 2020 09:45:08 -0400 Subject: [PATCH 479/632] BUG: copying series into empty dataframe does not preserve dataframe index name (#36141) --- doc/source/whatsnew/v1.1.2.rst | 1 + pandas/core/frame.py | 8 +++++--- pandas/tests/indexing/test_partial.py | 12 ++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index a214ad9762733..c6a08f4fb852a 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -43,6 +43,7 @@ Bug fixes - Bug in :class:`DataFrame` indexing returning an incorrect :class:`Series` in some cases when the series has been altered and a cache not invalidated (:issue:`33675`) - Bug in :meth:`DataFrame.corr` causing subsequent indexing lookups to be incorrect (:issue:`35882`) - Bug in :meth:`import_optional_dependency` returning incorrect package names in cases where package name is different from import name (:issue:`35948`) +- Bug when setting empty :class:`DataFrame` column to a :class:`Series` in preserving name of index in frame (:issue:`31368`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e1a889bf79d95..59cf4c0e2f81d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3206,9 +3206,11 @@ def _ensure_valid_index(self, value): "and a value that cannot be converted to a Series" ) from err - self._mgr = self._mgr.reindex_axis( - value.index.copy(), axis=1, fill_value=np.nan - ) + # GH31368 preserve name of index + index_copy = value.index.copy() + index_copy.name = self.index.name + + self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values, loc: int) -> Series: """ diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index 350f86b4e9fd0..7afbbc2b9ab2b 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -660,3 +660,15 @@ def test_indexing_timeseries_regression(self): expected = Series(rng, index=rng) tm.assert_series_equal(result, expected) + + def test_index_name_empty(self): + # GH 31368 + df = pd.DataFrame({}, index=pd.RangeIndex(0, name="df_index")) + series = pd.Series(1.23, index=pd.RangeIndex(4, name="series_index")) + + df["series"] = series + expected = pd.DataFrame( + {"series": [1.23] * 4}, index=pd.RangeIndex(4, name="df_index") + ) + + tm.assert_frame_equal(df, expected) From 903ca22632dcee944f75a9260749ca8734f1697d Mon Sep 17 00:00:00 2001 From: tiagohonorato <61059243+tiagohonorato@users.noreply.github.com> Date: Tue, 8 Sep 2020 12:28:21 -0300 Subject: [PATCH 480/632] CLN remove trailing commas (#36222) --- pandas/tests/io/pytables/test_timezones.py | 4 ++-- pandas/tests/io/test_feather.py | 2 +- pandas/tests/io/test_s3.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py index 38d32b0bdc8a3..1c29928991cde 100644 --- a/pandas/tests/io/pytables/test_timezones.py +++ b/pandas/tests/io/pytables/test_timezones.py @@ -110,7 +110,7 @@ def test_append_with_timezones_dateutil(setup_path): dti = dti._with_freq(None) # freq doesnt round-trip # GH 4098 example - df = DataFrame(dict(A=Series(range(3), index=dti,))) + df = DataFrame(dict(A=Series(range(3), index=dti))) _maybe_remove(store, "df") store.put("df", df) @@ -197,7 +197,7 @@ def test_append_with_timezones_pytz(setup_path): dti = dti._with_freq(None) # freq doesnt round-trip # GH 4098 example - df = DataFrame(dict(A=Series(range(3), index=dti,))) + df = DataFrame(dict(A=Series(range(3), index=dti))) _maybe_remove(store, "df") store.put("df", df) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index a8a5c8f00e6bf..c1e63f512b53e 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -76,7 +76,7 @@ def test_basic(self): pd.Timestamp("20130103"), ], "dtns": pd.DatetimeIndex( - list(pd.date_range("20130101", periods=3, freq="ns")), freq=None, + list(pd.date_range("20130101", periods=3, freq="ns")), freq=None ), } ) diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py index a137e76b1696b..0ee6cb0796644 100644 --- a/pandas/tests/io/test_s3.py +++ b/pandas/tests/io/test_s3.py @@ -43,6 +43,6 @@ def test_read_with_creds_from_pub_bucket(): os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") df = read_csv( - "s3://gdelt-open-data/events/1981.csv", nrows=5, sep="\t", header=None, + "s3://gdelt-open-data/events/1981.csv", nrows=5, sep="\t", header=None ) assert len(df) == 5 From f4458f2ad25f26bdc9288cc77440078d84110352 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 8 Sep 2020 08:29:03 -0700 Subject: [PATCH 481/632] CLN: remove unused return value in _create_blocks (#36196) --- pandas/core/window/rolling.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 9466ada3f4578..5a7482076903c 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -234,7 +234,7 @@ def _validate_get_window_bounds_signature(window: BaseIndexer) -> None: f"get_window_bounds" ) - def _create_blocks(self, obj: FrameOrSeriesUnion): + def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries: """ Split data into blocks & return conformed data. """ @@ -242,9 +242,8 @@ def _create_blocks(self, obj: FrameOrSeriesUnion): if self.on is not None and not isinstance(self.on, Index): if obj.ndim == 2: obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) - blocks = obj._to_dict_of_blocks(copy=False).values() - return blocks, obj + return obj def _gotitem(self, key, ndim, subset=None): """ @@ -333,7 +332,7 @@ def __repr__(self) -> str: def __iter__(self): window = self._get_window(win_type=None) - _, obj = self._create_blocks(self._selected_obj) + obj = self._create_data(self._selected_obj) index = self._get_window_indexer(window=window) start, end = index.get_window_bounds( @@ -469,7 +468,7 @@ def _apply_series(self, homogeneous_func: Callable[..., ArrayLike]) -> "Series": """ Series version of _apply_blockwise """ - _, obj = self._create_blocks(self._selected_obj) + obj = self._create_data(self._selected_obj) try: values = self._prep_values(obj.values) @@ -489,7 +488,7 @@ def _apply_blockwise( if self._selected_obj.ndim == 1: return self._apply_series(homogeneous_func) - _, obj = self._create_blocks(self._selected_obj) + obj = self._create_data(self._selected_obj) mgr = obj._mgr def hfunc(bvalues: ArrayLike) -> ArrayLike: @@ -1268,7 +1267,7 @@ def count(self): # implementations shouldn't end up here assert not isinstance(self.window, BaseIndexer) - _, obj = self._create_blocks(self._selected_obj) + obj = self._create_data(self._selected_obj) def hfunc(values: np.ndarray) -> np.ndarray: result = notna(values) @@ -2234,7 +2233,7 @@ def _apply( def _constructor(self): return Rolling - def _create_blocks(self, obj: FrameOrSeriesUnion): + def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries: """ Split data into blocks & return conformed data. """ @@ -2246,7 +2245,7 @@ def _create_blocks(self, obj: FrameOrSeriesUnion): list(self._groupby.grouper.indices.values()) ).astype(np.int64) obj = obj.take(groupby_order) - return super()._create_blocks(obj) + return super()._create_data(obj) def _get_cython_func_type(self, func: str) -> Callable: """ From 8c7efd1c2859c807e1326c695e330e7c03dcd392 Mon Sep 17 00:00:00 2001 From: Irv Lustig Date: Tue, 8 Sep 2020 11:30:36 -0400 Subject: [PATCH 482/632] Make to_numeric default to correct precision (#36149) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/_libs/src/parse_helper.h | 4 +- pandas/tests/tools/test_to_numeric.py | 58 +++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2afa1f1a6199e..2aac2596c18cb 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -245,7 +245,7 @@ Timezones Numeric ^^^^^^^ -- +- Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`) - Conversion diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h index 2ada0a4bd173d..d161c4e29fe15 100644 --- a/pandas/_libs/src/parse_helper.h +++ b/pandas/_libs/src/parse_helper.h @@ -18,7 +18,9 @@ int to_double(char *item, double *p_value, char sci, char decimal, char *p_end = NULL; int error = 0; - *p_value = xstrtod(item, &p_end, decimal, sci, '\0', 1, &error, maybe_int); + /* Switch to precise xstrtod GH 31364 */ + *p_value = precise_xstrtod(item, &p_end, decimal, sci, '\0', 1, + &error, maybe_int); return (error == 0) && (!*p_end); } diff --git a/pandas/tests/tools/test_to_numeric.py b/pandas/tests/tools/test_to_numeric.py index 263887a8ea36e..450076f2824ad 100644 --- a/pandas/tests/tools/test_to_numeric.py +++ b/pandas/tests/tools/test_to_numeric.py @@ -649,3 +649,61 @@ def test_failure_to_convert_uint64_string_to_NaN(): ser = Series([32, 64, np.nan]) result = to_numeric(pd.Series(["32", "64", "uint64"]), errors="coerce") tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize( + "strrep", + [ + "243.164", + "245.968", + "249.585", + "259.745", + "265.742", + "272.567", + "279.196", + "280.366", + "275.034", + "271.351", + "272.889", + "270.627", + "280.828", + "290.383", + "308.153", + "319.945", + "336.0", + "344.09", + "351.385", + "356.178", + "359.82", + "361.03", + "367.701", + "380.812", + "387.98", + "391.749", + "391.171", + "385.97", + "385.345", + "386.121", + "390.996", + "399.734", + "413.073", + "421.532", + "430.221", + "437.092", + "439.746", + "446.01", + "451.191", + "460.463", + "469.779", + "472.025", + "479.49", + "474.864", + "467.54", + "471.978", + ], +) +def test_precision_float_conversion(strrep): + # GH 31364 + result = to_numeric(strrep) + + assert result == float(strrep) From 490a999ac19a20002c2fb3e5fff2003329947441 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 8 Sep 2020 15:24:54 -0700 Subject: [PATCH 483/632] REF: implement Categorical._box_func, make _box_func a method (#36206) --- pandas/core/arrays/categorical.py | 10 ++++++---- pandas/core/arrays/datetimelike.py | 3 +-- pandas/core/arrays/datetimes.py | 6 +++--- pandas/core/arrays/period.py | 5 ++--- pandas/core/arrays/timedeltas.py | 18 +++++++++++++----- pandas/core/indexes/datetimelike.py | 10 +++++----- 6 files changed, 30 insertions(+), 22 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index b732db4c66003..f20d3d5e316f8 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1734,6 +1734,11 @@ def _ndarray(self) -> np.ndarray: def _from_backing_data(self, arr: np.ndarray) -> "Categorical": return self._constructor(arr, dtype=self.dtype, fastpath=True) + def _box_func(self, i: int): + if i == -1: + return np.NaN + return self.categories[i] + # ------------------------------------------------------------------ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None): @@ -1874,10 +1879,7 @@ def __getitem__(self, key): """ if isinstance(key, (int, np.integer)): i = self._codes[key] - if i == -1: - return np.nan - else: - return self.categories[i] + return self._box_func(i) key = check_array_indexer(self, key) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 6477b94a823ce..ba5bfc108f16b 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -478,8 +478,7 @@ def _from_backing_data(self: _T, arr: np.ndarray) -> _T: # ------------------------------------------------------------------ - @property - def _box_func(self): + def _box_func(self, x): """ box function to get object from internal representation """ diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d913e7be9ae5f..9f10cc84dcfcc 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -7,6 +7,7 @@ from pandas._libs import lib, tslib from pandas._libs.tslibs import ( NaT, + NaTType, Resolution, Timestamp, conversion, @@ -475,9 +476,8 @@ def _maybe_clear_freq(self): # ----------------------------------------------------------------- # Descriptive Properties - @property - def _box_func(self): - return lambda x: Timestamp(x, freq=self.freq, tz=self.tz) + def _box_func(self, x) -> Union[Timestamp, NaTType]: + return Timestamp(x, freq=self.freq, tz=self.tz) @property def dtype(self) -> Union[np.dtype, DatetimeTZDtype]: diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index c3a9430736969..eea11bde77030 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -484,9 +484,8 @@ def _time_shift(self, periods, freq=None): values[self._isnan] = iNaT return type(self)(values, freq=self.freq) - @property - def _box_func(self): - return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq) + def _box_func(self, x) -> Union[Period, NaTType]: + return Period._from_ordinal(ordinal=x, freq=self.freq) def asfreq(self, freq=None, how: str = "E") -> "PeriodArray": """ diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 485ebb49a376d..5e3c0f2b8d876 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -1,10 +1,19 @@ from datetime import timedelta -from typing import List +from typing import List, Union import numpy as np from pandas._libs import lib, tslibs -from pandas._libs.tslibs import NaT, Period, Tick, Timedelta, Timestamp, iNaT, to_offset +from pandas._libs.tslibs import ( + NaT, + NaTType, + Period, + Tick, + Timedelta, + Timestamp, + iNaT, + to_offset, +) from pandas._libs.tslibs.conversion import precision_from_unit from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64, parse_timedelta_unit @@ -108,9 +117,8 @@ class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): # Note: ndim must be defined to ensure NaT.__richcmp(TimedeltaArray) # operates pointwise. - @property - def _box_func(self): - return lambda x: Timedelta(x, unit="ns") + def _box_func(self, x) -> Union[Timedelta, NaTType]: + return Timedelta(x, unit="ns") @property def dtype(self): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index e7e93068d9175..54c8ed60b6097 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -81,7 +81,7 @@ def wrapper(left, right): DatetimeLikeArrayMixin, cache=True, ) -@inherit_names(["mean", "asi8", "freq", "freqstr", "_box_func"], DatetimeLikeArrayMixin) +@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin) class DatetimeIndexOpsMixin(ExtensionIndex): """ Common ops mixin to support a unified interface datetimelike Index. @@ -244,7 +244,7 @@ def min(self, axis=None, skipna=True, *args, **kwargs): # quick check if len(i8) and self.is_monotonic: if i8[0] != iNaT: - return self._box_func(i8[0]) + return self._data._box_func(i8[0]) if self.hasnans: if skipna: @@ -253,7 +253,7 @@ def min(self, axis=None, skipna=True, *args, **kwargs): return self._na_value else: min_stamp = i8.min() - return self._box_func(min_stamp) + return self._data._box_func(min_stamp) except ValueError: return self._na_value @@ -301,7 +301,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs): # quick check if len(i8) and self.is_monotonic: if i8[-1] != iNaT: - return self._box_func(i8[-1]) + return self._data._box_func(i8[-1]) if self.hasnans: if skipna: @@ -310,7 +310,7 @@ def max(self, axis=None, skipna=True, *args, **kwargs): return self._na_value else: max_stamp = i8.max() - return self._box_func(max_stamp) + return self._data._box_func(max_stamp) except ValueError: return self._na_value From d26f99a9f138226178497e6f0fb9e1052baabf7f Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 8 Sep 2020 15:41:30 -0700 Subject: [PATCH 484/632] STY: de-privatize names imported across modules (#36178) --- pandas/__init__.py | 6 +-- pandas/_testing.py | 2 +- pandas/compat/numpy/__init__.py | 10 ++--- pandas/core/array_algos/masked_reductions.py | 4 +- pandas/core/arrays/sparse/accessor.py | 8 ++-- pandas/core/arrays/sparse/scipy_sparse.py | 4 +- pandas/core/common.py | 4 +- pandas/core/computation/engines.py | 4 +- pandas/core/computation/expr.py | 24 +++++------ pandas/core/computation/expressions.py | 12 +++--- pandas/core/computation/ops.py | 38 ++++++++--------- pandas/core/computation/scope.py | 4 +- pandas/core/dtypes/cast.py | 4 +- pandas/core/dtypes/common.py | 2 +- pandas/core/generic.py | 4 +- pandas/core/index.py | 2 +- pandas/core/indexes/base.py | 4 +- pandas/core/indexes/multi.py | 4 +- pandas/core/nanops.py | 6 +-- pandas/core/ops/__init__.py | 12 +++--- pandas/core/reshape/merge.py | 2 +- pandas/io/formats/format.py | 4 +- pandas/io/json/_json.py | 6 +-- pandas/io/parsers.py | 6 +-- pandas/tests/arrays/test_datetimelike.py | 4 +- pandas/tests/computation/test_eval.py | 42 +++++++++---------- pandas/tests/frame/test_arithmetic.py | 2 +- pandas/tests/generic/test_generic.py | 6 +-- pandas/tests/indexes/common.py | 4 +- pandas/tests/indexes/multi/test_analytics.py | 4 +- pandas/tests/indexes/test_numpy_compat.py | 8 ++-- pandas/tests/indexing/test_loc.py | 4 +- .../tests/scalar/timedelta/test_arithmetic.py | 6 ++- pandas/tests/test_common.py | 4 +- pandas/tests/test_expressions.py | 2 +- pandas/util/_test_decorators.py | 6 +-- 36 files changed, 135 insertions(+), 133 deletions(-) diff --git a/pandas/__init__.py b/pandas/__init__.py index 2737bcd8f9ccf..70bb0c8a2cb51 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -20,9 +20,9 @@ # numpy compat from pandas.compat.numpy import ( - _np_version_under1p17, - _np_version_under1p18, - _is_numpy_dev, + np_version_under1p17 as _np_version_under1p17, + np_version_under1p18 as _np_version_under1p18, + is_numpy_dev as _is_numpy_dev, ) try: diff --git a/pandas/_testing.py b/pandas/_testing.py index 7dba578951deb..9db0c3496e290 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -2713,7 +2713,7 @@ def use_numexpr(use, min_elements=None): if min_elements is None: min_elements = expr._MIN_ELEMENTS - olduse = expr._USE_NUMEXPR + olduse = expr.USE_NUMEXPR oldmin = expr._MIN_ELEMENTS expr.set_use_numexpr(use) expr._MIN_ELEMENTS = min_elements diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 08d06da93bb45..a2444b7ba5a0d 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -8,11 +8,11 @@ # numpy versioning _np_version = np.__version__ _nlv = LooseVersion(_np_version) -_np_version_under1p17 = _nlv < LooseVersion("1.17") -_np_version_under1p18 = _nlv < LooseVersion("1.18") +np_version_under1p17 = _nlv < LooseVersion("1.17") +np_version_under1p18 = _nlv < LooseVersion("1.18") _np_version_under1p19 = _nlv < LooseVersion("1.19") _np_version_under1p20 = _nlv < LooseVersion("1.20") -_is_numpy_dev = ".dev" in str(_nlv) +is_numpy_dev = ".dev" in str(_nlv) _min_numpy_ver = "1.16.5" @@ -65,6 +65,6 @@ def np_array_datetime64_compat(arr, *args, **kwargs): __all__ = [ "np", "_np_version", - "_np_version_under1p17", - "_is_numpy_dev", + "np_version_under1p17", + "is_numpy_dev", ] diff --git a/pandas/core/array_algos/masked_reductions.py b/pandas/core/array_algos/masked_reductions.py index 1b9ed014f27b7..3f4625e2b712a 100644 --- a/pandas/core/array_algos/masked_reductions.py +++ b/pandas/core/array_algos/masked_reductions.py @@ -8,7 +8,7 @@ import numpy as np from pandas._libs import missing as libmissing -from pandas.compat.numpy import _np_version_under1p17 +from pandas.compat.numpy import np_version_under1p17 from pandas.core.nanops import check_below_min_count @@ -46,7 +46,7 @@ def _sumprod( if check_below_min_count(values.shape, mask, min_count): return libmissing.NA - if _np_version_under1p17: + if np_version_under1p17: return func(values[~mask]) else: return func(values, where=~mask) diff --git a/pandas/core/arrays/sparse/accessor.py b/pandas/core/arrays/sparse/accessor.py index da8d695c59b9e..ec4b0fd89860c 100644 --- a/pandas/core/arrays/sparse/accessor.py +++ b/pandas/core/arrays/sparse/accessor.py @@ -88,9 +88,9 @@ def from_coo(cls, A, dense_index=False): dtype: Sparse[float64, nan] """ from pandas import Series - from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series + from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series - result = _coo_to_sparse_series(A, dense_index=dense_index) + result = coo_to_sparse_series(A, dense_index=dense_index) result = Series(result.array, index=result.index, copy=False) return result @@ -168,9 +168,9 @@ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False): >>> columns [('a', 0), ('a', 1), ('b', 0), ('b', 1)] """ - from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo + from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo - A, rows, columns = _sparse_series_to_coo( + A, rows, columns = sparse_series_to_coo( self._parent, row_levels, column_levels, sort_labels=sort_labels ) return A, rows, columns diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index eafd782dc9b9c..56c678c88b9c7 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -85,7 +85,7 @@ def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): return values, i_coord, j_coord, i_labels, j_labels -def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=False): +def sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=False): """ Convert a sparse Series to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column @@ -113,7 +113,7 @@ def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=F return sparse_matrix, rows, columns -def _coo_to_sparse_series(A, dense_index: bool = False): +def coo_to_sparse_series(A, dense_index: bool = False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. diff --git a/pandas/core/common.py b/pandas/core/common.py index 279d512e5a046..968fb180abcd0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -16,7 +16,7 @@ from pandas._libs import lib, tslibs from pandas._typing import AnyArrayLike, Scalar, T -from pandas.compat.numpy import _np_version_under1p18 +from pandas.compat.numpy import np_version_under1p18 from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -425,7 +425,7 @@ def random_state(state=None): if ( is_integer(state) or is_array_like(state) - or (not _np_version_under1p18 and isinstance(state, np.random.BitGenerator)) + or (not np_version_under1p18 and isinstance(state, np.random.BitGenerator)) ): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): diff --git a/pandas/core/computation/engines.py b/pandas/core/computation/engines.py index 9c5388faae1bd..0cdc0f530a7f3 100644 --- a/pandas/core/computation/engines.py +++ b/pandas/core/computation/engines.py @@ -6,11 +6,11 @@ from typing import Dict, Type from pandas.core.computation.align import align_terms, reconstruct_object -from pandas.core.computation.ops import _mathops, _reductions +from pandas.core.computation.ops import MATHOPS, REDUCTIONS import pandas.io.formats.printing as printing -_ne_builtins = frozenset(_mathops + _reductions) +_ne_builtins = frozenset(MATHOPS + REDUCTIONS) class NumExprClobberingError(NameError): diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index df71b4fe415f8..8cff6abc071ca 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -12,7 +12,13 @@ import pandas.core.common as com from pandas.core.computation.ops import ( - _LOCAL_TAG, + ARITH_OPS_SYMS, + BOOL_OPS_SYMS, + CMP_OPS_SYMS, + LOCAL_TAG, + MATHOPS, + REDUCTIONS, + UNARY_OPS_SYMS, BinOp, Constant, Div, @@ -21,12 +27,6 @@ Term, UnaryOp, UndefinedVariableError, - _arith_ops_syms, - _bool_ops_syms, - _cmp_ops_syms, - _mathops, - _reductions, - _unary_ops_syms, is_term, ) from pandas.core.computation.parsing import clean_backtick_quoted_toks, tokenize_string @@ -101,7 +101,7 @@ def _replace_locals(tok: Tuple[int, str]) -> Tuple[int, str]: """ toknum, tokval = tok if toknum == tokenize.OP and tokval == "@": - return tokenize.OP, _LOCAL_TAG + return tokenize.OP, LOCAL_TAG return toknum, tokval @@ -338,7 +338,7 @@ class BaseExprVisitor(ast.NodeVisitor): const_type: Type[Term] = Constant term_type = Term - binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms + binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS binary_op_nodes = ( "Gt", "Lt", @@ -362,7 +362,7 @@ class BaseExprVisitor(ast.NodeVisitor): ) binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes)) - unary_ops = _unary_ops_syms + unary_ops = UNARY_OPS_SYMS unary_op_nodes = "UAdd", "USub", "Invert", "Not" unary_op_nodes_map = {k: v for k, v in zip(unary_ops, unary_op_nodes)} @@ -494,7 +494,7 @@ def _maybe_evaluate_binop( if self.engine != "pytables": if ( - res.op in _cmp_ops_syms + res.op in CMP_OPS_SYMS and getattr(lhs, "is_datetime", False) or getattr(rhs, "is_datetime", False) ): @@ -726,7 +726,7 @@ def visitor(x, y): _python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"]) -_numexpr_supported_calls = frozenset(_reductions + _mathops) +_numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS) @disallow( diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index d2c08c343ab4b..0032fe97b8b33 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -23,7 +23,7 @@ _TEST_MODE = None _TEST_RESULT: List[bool] = list() -_USE_NUMEXPR = NUMEXPR_INSTALLED +USE_NUMEXPR = NUMEXPR_INSTALLED _evaluate = None _where = None @@ -39,21 +39,21 @@ def set_use_numexpr(v=True): # set/unset to use numexpr - global _USE_NUMEXPR + global USE_NUMEXPR if NUMEXPR_INSTALLED: - _USE_NUMEXPR = v + USE_NUMEXPR = v # choose what we are going to do global _evaluate, _where - _evaluate = _evaluate_numexpr if _USE_NUMEXPR else _evaluate_standard - _where = _where_numexpr if _USE_NUMEXPR else _where_standard + _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard + _where = _where_numexpr if USE_NUMEXPR else _where_standard def set_numexpr_threads(n=None): # if we are using numexpr, set the threads to n # otherwise reset - if NUMEXPR_INSTALLED and _USE_NUMEXPR: + if NUMEXPR_INSTALLED and USE_NUMEXPR: if n is None: n = ne.detect_number_of_cores() ne.set_num_threads(n) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 1fb3910b8577d..5759cd17476d6 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -16,11 +16,11 @@ import pandas.core.common as com from pandas.core.computation.common import ensure_decoded, result_type_many -from pandas.core.computation.scope import _DEFAULT_GLOBALS +from pandas.core.computation.scope import DEFAULT_GLOBALS from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded -_reductions = ("sum", "prod") +REDUCTIONS = ("sum", "prod") _unary_math_ops = ( "sin", @@ -46,10 +46,10 @@ ) _binary_math_ops = ("arctan2",) -_mathops = _unary_math_ops + _binary_math_ops +MATHOPS = _unary_math_ops + _binary_math_ops -_LOCAL_TAG = "__pd_eval_local_" +LOCAL_TAG = "__pd_eval_local_" class UndefinedVariableError(NameError): @@ -80,13 +80,13 @@ def __init__(self, name, env, side=None, encoding=None): self.env = env self.side = side tname = str(name) - self.is_local = tname.startswith(_LOCAL_TAG) or tname in _DEFAULT_GLOBALS + self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS self._value = self._resolve_name() self.encoding = encoding @property def local_name(self) -> str: - return self.name.replace(_LOCAL_TAG, "") + return self.name.replace(LOCAL_TAG, "") def __repr__(self) -> str: return pprint_thing(self.name) @@ -220,7 +220,7 @@ def __repr__(self) -> str: @property def return_type(self): # clobber types to bool if the op is a boolean operator - if self.op in (_cmp_ops_syms + _bool_ops_syms): + if self.op in (CMP_OPS_SYMS + BOOL_OPS_SYMS): return np.bool_ return result_type_many(*(term.type for term in com.flatten(self))) @@ -280,7 +280,7 @@ def _not_in(x, y): return x not in y -_cmp_ops_syms = (">", "<", ">=", "<=", "==", "!=", "in", "not in") +CMP_OPS_SYMS = (">", "<", ">=", "<=", "==", "!=", "in", "not in") _cmp_ops_funcs = ( operator.gt, operator.lt, @@ -291,13 +291,13 @@ def _not_in(x, y): _in, _not_in, ) -_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs)) +_cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs)) -_bool_ops_syms = ("&", "|", "and", "or") +BOOL_OPS_SYMS = ("&", "|", "and", "or") _bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_) -_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs)) +_bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs)) -_arith_ops_syms = ("+", "-", "*", "/", "**", "//", "%") +ARITH_OPS_SYMS = ("+", "-", "*", "/", "**", "//", "%") _arith_ops_funcs = ( operator.add, operator.sub, @@ -307,12 +307,12 @@ def _not_in(x, y): operator.floordiv, operator.mod, ) -_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs)) +_arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs)) -_special_case_arith_ops_syms = ("**", "//", "%") +SPECIAL_CASE_ARITH_OPS_SYMS = ("**", "//", "%") _special_case_arith_ops_funcs = (operator.pow, operator.floordiv, operator.mod) _special_case_arith_ops_dict = dict( - zip(_special_case_arith_ops_syms, _special_case_arith_ops_funcs) + zip(SPECIAL_CASE_ARITH_OPS_SYMS, _special_case_arith_ops_funcs) ) _binary_ops_dict = {} @@ -530,9 +530,9 @@ def __init__(self, lhs, rhs): _cast_inplace(com.flatten(self), acceptable_dtypes, np.float_) -_unary_ops_syms = ("+", "-", "~", "not") +UNARY_OPS_SYMS = ("+", "-", "~", "not") _unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert) -_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs)) +_unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs)) class UnaryOp(Op): @@ -561,7 +561,7 @@ def __init__(self, op: str, operand): except KeyError as err: raise ValueError( f"Invalid unary operator {repr(op)}, " - f"valid operators are {_unary_ops_syms}" + f"valid operators are {UNARY_OPS_SYMS}" ) from err def __call__(self, env): @@ -602,7 +602,7 @@ class FuncNode: def __init__(self, name: str): from pandas.core.computation.check import NUMEXPR_INSTALLED, NUMEXPR_VERSION - if name not in _mathops or ( + if name not in MATHOPS or ( NUMEXPR_INSTALLED and NUMEXPR_VERSION < LooseVersion("2.6.9") and name in ("floor", "ceil") diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 83bf92ad737e4..2925f583bfc56 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -53,7 +53,7 @@ def _raw_hex_id(obj) -> str: return "".join(_replacer(x) for x in packed) -_DEFAULT_GLOBALS = { +DEFAULT_GLOBALS = { "Timestamp": Timestamp, "datetime": datetime.datetime, "True": True, @@ -114,7 +114,7 @@ def __init__( # shallow copy because we don't want to keep filling this up with what # was there before if there are multiple calls to Scope/_ensure_scope - self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy()) + self.scope = DeepChainMap(DEFAULT_GLOBALS.copy()) self.target = target if isinstance(local_dict, Scope): diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 7c5aafcbbc7e9..8f9c0cf7a01db 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -22,9 +22,9 @@ from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.common import ( - _POSSIBLY_CAST_DTYPES, DT64NS_DTYPE, INT64_DTYPE, + POSSIBLY_CAST_DTYPES, TD64NS_DTYPE, ensure_int8, ensure_int16, @@ -1188,7 +1188,7 @@ def maybe_castable(arr) -> bool: elif kind == "m": return is_timedelta64_ns_dtype(arr.dtype) - return arr.dtype.name not in _POSSIBLY_CAST_DTYPES + return arr.dtype.name not in POSSIBLY_CAST_DTYPES def maybe_infer_to_datetimelike(value, convert_dates: bool = False): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 6ad46eb967275..5987fdabf78bb 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -43,7 +43,7 @@ is_sequence, ) -_POSSIBLY_CAST_DTYPES = { +POSSIBLY_CAST_DTYPES = { np.dtype(t).name for t in [ "O", diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 93c945638a174..40f0c6200e835 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -102,7 +102,7 @@ import pandas.core.indexing as indexing from pandas.core.internals import BlockManager from pandas.core.missing import find_valid_index -from pandas.core.ops import _align_method_FRAME +from pandas.core.ops import align_method_FRAME from pandas.core.shared_docs import _shared_docs from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window @@ -7402,7 +7402,7 @@ def _clip_with_one_bound(self, threshold, method, axis, inplace): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: - threshold = _align_method_FRAME(self, threshold, axis, flex=None)[1] + threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( diff --git a/pandas/core/index.py b/pandas/core/index.py index a315b9619b0e7..44f434e038a4b 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -19,7 +19,7 @@ ensure_index_from_sequences, get_objs_combined_axis, ) -from pandas.core.indexes.multi import _sparsify # noqa:F401 +from pandas.core.indexes.multi import sparsify_labels # noqa:F401 # GH#30193 warnings.warn( diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a1bc8a4659b24..526dae7e256b7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3586,7 +3586,7 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) def _join_multi(self, other, how, return_indexers=True): from pandas.core.indexes.multi import MultiIndex - from pandas.core.reshape.merge import _restore_dropped_levels_multijoin + from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names = set(com.not_none(*self.names)) @@ -3622,7 +3622,7 @@ def _join_multi(self, other, how, return_indexers=True): # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names - levels, codes, names = _restore_dropped_levels_multijoin( + levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, ridx ) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 9630e154ccd17..deeb7ff50b88c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1337,7 +1337,7 @@ def format( if sparsify in [False, lib.no_default]: sentinel = sparsify # little bit of a kludge job for #1217 - result_levels = _sparsify( + result_levels = sparsify_labels( result_levels, start=int(names), sentinel=sentinel ) @@ -3692,7 +3692,7 @@ def _add_numeric_methods_disabled(cls): MultiIndex._add_logical_methods_disabled() -def _sparsify(label_list, start: int = 0, sentinel=""): +def sparsify_labels(label_list, start: int = 0, sentinel=""): pivoted = list(zip(*label_list)) k = len(label_list) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 6fdde22a1c514..64470da2fb910 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -11,7 +11,7 @@ from pandas._typing import ArrayLike, Dtype, DtypeObj, F, Scalar from pandas.compat._optional import import_optional_dependency -from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask +from pandas.core.dtypes.cast import maybe_upcast_putmask from pandas.core.dtypes.common import ( get_dtype, is_any_int_dtype, @@ -185,7 +185,7 @@ def _get_fill_value( else: if fill_value_typ == "+inf": # need the max int here - return _int64_max + return np.iinfo(np.int64).max else: return iNaT @@ -346,7 +346,7 @@ def _wrap_results(result, dtype: DtypeObj, fill_value=None): result = np.nan # raise if we have a timedelta64[ns] which is too large - if np.fabs(result) > _int64_max: + if np.fabs(result) > np.iinfo(np.int64).max: raise ValueError("overflow in timedelta operation") result = Timedelta(result, unit="ns") diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index 60f3d23aaed13..8fcbee6a20ac3 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -306,7 +306,7 @@ def dispatch_to_series(left, right, func, axis: Optional[int] = None): def _align_method_SERIES(left: "Series", right, align_asobject: bool = False): """ align lhs and rhs Series """ - # ToDo: Different from _align_method_FRAME, list, tuple and ndarray + # ToDo: Different from align_method_FRAME, list, tuple and ndarray # are not coerced here # because Series has inconsistencies described in #13637 @@ -430,7 +430,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # DataFrame -def _align_method_FRAME( +def align_method_FRAME( left, right, axis, flex: Optional[bool] = False, level: Level = None ): """ @@ -571,7 +571,7 @@ def _frame_arith_method_with_reindex( new_right = right.iloc[:, rcols] result = op(new_left, new_right) - # Do the join on the columns instead of using _align_method_FRAME + # Do the join on the columns instead of using align_method_FRAME # to avoid constructing two potentially large/sparse DataFrames join_columns, _, _ = left.columns.join( right.columns, how="outer", level=None, return_indexers=True @@ -644,7 +644,7 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): # TODO: why are we passing flex=True instead of flex=not special? # 15 tests fail if we pass flex=not special instead - self, other = _align_method_FRAME(self, other, axis, flex=True, level=level) + self, other = align_method_FRAME(self, other, axis, flex=True, level=level) if isinstance(other, ABCDataFrame): # Another DataFrame @@ -680,7 +680,7 @@ def _flex_comp_method_FRAME(cls: Type["DataFrame"], op, special: bool): def f(self, other, axis=default_axis, level=None): axis = self._get_axis_number(axis) if axis is not None else 1 - self, other = _align_method_FRAME(self, other, axis, flex=True, level=level) + self, other = align_method_FRAME(self, other, axis, flex=True, level=level) new_data = dispatch_to_series(self, other, op, axis=axis) return self._construct_result(new_data) @@ -698,7 +698,7 @@ def _comp_method_FRAME(cls: Type["DataFrame"], op, special: bool): def f(self, other): axis = 1 # only relevant for Series other case - self, other = _align_method_FRAME(self, other, axis, level=None, flex=False) + self, other = align_method_FRAME(self, other, axis, level=None, flex=False) # See GH#4537 for discussion of scalar op behavior new_data = dispatch_to_series(self, other, op, axis=axis) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index f1c5486222ea1..030dec369c2be 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1350,7 +1350,7 @@ def _get_join_indexers( return join_func(lkey, rkey, count, **kwargs) -def _restore_dropped_levels_multijoin( +def restore_dropped_levels_multijoin( left: MultiIndex, right: MultiIndex, dropped_level_names, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 623dc6e6bad91..6781d98ded41d 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -995,7 +995,7 @@ def to_html( ) def _get_formatted_column_labels(self, frame: "DataFrame") -> List[List[str]]: - from pandas.core.indexes.multi import _sparsify + from pandas.core.indexes.multi import sparsify_labels columns = frame.columns @@ -1021,7 +1021,7 @@ def space_format(x, y): zip(*[[space_format(x, y) for y in x] for x in fmt_columns]) ) if self.sparsify and len(str_columns): - str_columns = _sparsify(str_columns) + str_columns = sparsify_labels(str_columns) str_columns = [list(x) for x in zip(*str_columns)] else: diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index a4d923fdbe45a..e3000788cb33a 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -22,7 +22,7 @@ from pandas.io.common import get_compression_method, get_filepath_or_buffer, get_handle from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import build_table_schema, parse_table_schema -from pandas.io.parsers import _validate_integer +from pandas.io.parsers import validate_integer loads = json.loads dumps = json.dumps @@ -698,11 +698,11 @@ def __init__( self.file_handles: List[IO] = [] if self.chunksize is not None: - self.chunksize = _validate_integer("chunksize", self.chunksize, 1) + self.chunksize = validate_integer("chunksize", self.chunksize, 1) if not self.lines: raise ValueError("chunksize can only be passed if lines=True") if self.nrows is not None: - self.nrows = _validate_integer("nrows", self.nrows, 0) + self.nrows = validate_integer("nrows", self.nrows, 0) if not self.lines: raise ValueError("nrows can only be passed if lines=True") diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index a0466c5ac6b57..4c619a636f057 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -361,7 +361,7 @@ ) -def _validate_integer(name, val, min_val=0): +def validate_integer(name, val, min_val=0): """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer @@ -436,7 +436,7 @@ def _read(filepath_or_buffer: FilePathOrBuffer, kwds): # Extract some of the arguments (pass chunksize on). iterator = kwds.get("iterator", False) - chunksize = _validate_integer("chunksize", kwds.get("chunksize", None), 1) + chunksize = validate_integer("chunksize", kwds.get("chunksize", None), 1) nrows = kwds.get("nrows", None) # Check for duplicates in names. @@ -1179,7 +1179,7 @@ def _failover_to_python(self): raise AbstractMethodError(self) def read(self, nrows=None): - nrows = _validate_integer("nrows", nrows) + nrows = validate_integer("nrows", nrows) ret = self._engine.read(nrows) # May alter columns / col_dict diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 292557fc04258..d2d3766959fbf 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -4,7 +4,7 @@ import pytest from pandas._libs import OutOfBoundsDatetime -from pandas.compat.numpy import _np_version_under1p18 +from pandas.compat.numpy import np_version_under1p18 import pandas as pd import pandas._testing as tm @@ -960,7 +960,7 @@ def test_invalid_nat_setitem_array(array, non_casting_nats): ], ) def test_to_numpy_extra(array): - if _np_version_under1p18: + if np_version_under1p18: # np.isnan(NaT) raises, so use pandas' isnan = pd.isna else: diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 49066428eb16c..72dc04e68c154 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -26,12 +26,12 @@ PandasExprVisitor, PythonExprVisitor, ) -from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED +from pandas.core.computation.expressions import NUMEXPR_INSTALLED, USE_NUMEXPR from pandas.core.computation.ops import ( - _arith_ops_syms, + ARITH_OPS_SYMS, + SPECIAL_CASE_ARITH_OPS_SYMS, _binary_math_ops, _binary_ops_dict, - _special_case_arith_ops_syms, _unary_math_ops, ) @@ -41,8 +41,8 @@ pytest.param( engine, marks=pytest.mark.skipif( - engine == "numexpr" and not _USE_NUMEXPR, - reason=f"numexpr enabled->{_USE_NUMEXPR}, " + engine == "numexpr" and not USE_NUMEXPR, + reason=f"numexpr enabled->{USE_NUMEXPR}, " f"installed->{NUMEXPR_INSTALLED}", ), ) @@ -114,7 +114,7 @@ def _is_py3_complex_incompat(result, expected): return isinstance(expected, (complex, np.complexfloating)) and np.isnan(result) -_good_arith_ops = set(_arith_ops_syms).difference(_special_case_arith_ops_syms) +_good_arith_ops = set(ARITH_OPS_SYMS).difference(SPECIAL_CASE_ARITH_OPS_SYMS) @td.skip_if_no_ne @@ -158,10 +158,10 @@ def setup_data(self): self.rhses = self.pandas_rhses + self.scalar_rhses def setup_ops(self): - self.cmp_ops = expr._cmp_ops_syms + self.cmp_ops = expr.CMP_OPS_SYMS self.cmp2_ops = self.cmp_ops[::-1] - self.bin_ops = expr._bool_ops_syms - self.special_case_ops = _special_case_arith_ops_syms + self.bin_ops = expr.BOOL_OPS_SYMS + self.special_case_ops = SPECIAL_CASE_ARITH_OPS_SYMS self.arith_ops = _good_arith_ops self.unary_ops = "-", "~", "not " @@ -774,10 +774,10 @@ def setup_class(cls): cls.parser = "python" def setup_ops(self): - self.cmp_ops = [op for op in expr._cmp_ops_syms if op not in ("in", "not in")] + self.cmp_ops = [op for op in expr.CMP_OPS_SYMS if op not in ("in", "not in")] self.cmp2_ops = self.cmp_ops[::-1] - self.bin_ops = [op for op in expr._bool_ops_syms if op not in ("and", "or")] - self.special_case_ops = _special_case_arith_ops_syms + self.bin_ops = [op for op in expr.BOOL_OPS_SYMS if op not in ("and", "or")] + self.special_case_ops = SPECIAL_CASE_ARITH_OPS_SYMS self.arith_ops = _good_arith_ops self.unary_ops = "+", "-", "~" @@ -1135,7 +1135,7 @@ class TestOperationsNumExprPandas: def setup_class(cls): cls.engine = "numexpr" cls.parser = "pandas" - cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms + cls.arith_ops = expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS @classmethod def teardown_class(cls): @@ -1177,7 +1177,7 @@ def test_simple_arith_ops(self): assert y == expec def test_simple_bool_ops(self): - for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), (True, False)): + for op, lhs, rhs in product(expr.BOOL_OPS_SYMS, (True, False), (True, False)): ex = f"{lhs} {op} {rhs}" res = self.eval(ex) exp = eval(ex) @@ -1185,7 +1185,7 @@ def test_simple_bool_ops(self): def test_bool_ops_with_constants(self): for op, lhs, rhs in product( - expr._bool_ops_syms, ("True", "False"), ("True", "False") + expr.BOOL_OPS_SYMS, ("True", "False"), ("True", "False") ): ex = f"{lhs} {op} {rhs}" res = self.eval(ex) @@ -1637,7 +1637,7 @@ def setup_class(cls): cls.parser = "python" cls.arith_ops = [ op - for op in expr._arith_ops_syms + expr._cmp_ops_syms + for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS if op not in ("in", "not in") ] @@ -1697,7 +1697,7 @@ def test_fails_pipe(self): def test_bool_ops_with_constants(self): for op, lhs, rhs in product( - expr._bool_ops_syms, ("True", "False"), ("True", "False") + expr.BOOL_OPS_SYMS, ("True", "False"), ("True", "False") ): ex = f"{lhs} {op} {rhs}" if op in ("and", "or"): @@ -1710,7 +1710,7 @@ def test_bool_ops_with_constants(self): assert res == exp def test_simple_bool_ops(self): - for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), (True, False)): + for op, lhs, rhs in product(expr.BOOL_OPS_SYMS, (True, False), (True, False)): ex = f"lhs {op} rhs" if op in ("and", "or"): msg = "'BoolOp' nodes are not implemented" @@ -1729,7 +1729,7 @@ def setup_class(cls): cls.engine = cls.parser = "python" cls.arith_ops = [ op - for op in expr._arith_ops_syms + expr._cmp_ops_syms + for op in expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS if op not in ("in", "not in") ] @@ -1740,7 +1740,7 @@ def setup_class(cls): super().setup_class() cls.engine = "python" cls.parser = "pandas" - cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms + cls.arith_ops = expr.ARITH_OPS_SYMS + expr.CMP_OPS_SYMS @td.skip_if_no_ne @@ -2020,7 +2020,7 @@ def test_equals_various(other): df = DataFrame({"A": ["a", "b", "c"]}) result = df.eval(f"A == {other}") expected = Series([False, False, False], name="A") - if _USE_NUMEXPR: + if USE_NUMEXPR: # https://github.com/pandas-dev/pandas/issues/10239 # lose name with numexpr engine. Remove when that's fixed. expected.name = None diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 70d0b4e9e835c..6dd8d890e8a4b 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1417,7 +1417,7 @@ def test_alignment_non_pandas(self): columns = ["X", "Y", "Z"] df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns) - align = pd.core.ops._align_method_FRAME + align = pd.core.ops.align_method_FRAME for val in [ [1, 2, 3], (1, 2, 3), diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 23bb673586768..2c2584e8dee01 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas.compat.numpy import _np_version_under1p17 +from pandas.compat.numpy import np_version_under1p17 from pandas.core.dtypes.common import is_scalar @@ -652,12 +652,12 @@ def test_sample(sel): pytest.param( "np.random.MT19937", 3, - marks=pytest.mark.skipif(_np_version_under1p17, reason="NumPy<1.17"), + marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"), ), pytest.param( "np.random.PCG64", 11, - marks=pytest.mark.skipif(_np_version_under1p17, reason="NumPy<1.17"), + marks=pytest.mark.skipif(np_version_under1p17, reason="NumPy<1.17"), ), ], ) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index e95e7267f17ec..11dc232af8de4 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -5,7 +5,7 @@ import pytest from pandas._libs import iNaT -from pandas.compat.numpy import _is_numpy_dev +from pandas.compat.numpy import is_numpy_dev from pandas.errors import InvalidIndexError from pandas.core.dtypes.common import is_datetime64tz_dtype @@ -475,7 +475,7 @@ def test_intersection_base(self, index, request): for case in cases: # https://github.com/pandas-dev/pandas/issues/35481 if ( - _is_numpy_dev + is_numpy_dev and isinstance(case, Series) and isinstance(index, UInt64Index) ): diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index 9e4e73e793bac..d661a56311e6c 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from pandas.compat.numpy import _np_version_under1p17 +from pandas.compat.numpy import np_version_under1p17 import pandas as pd from pandas import Index, MultiIndex, date_range, period_range @@ -240,7 +240,7 @@ def test_numpy_ufuncs(idx, func): # test ufuncs of numpy. see: # https://numpy.org/doc/stable/reference/ufuncs.html - if _np_version_under1p17: + if np_version_under1p17: expected_exception = AttributeError msg = f"'tuple' object has no attribute '{func.__name__}'" else: diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py index 043539c173427..a83684464caf6 100644 --- a/pandas/tests/indexes/test_numpy_compat.py +++ b/pandas/tests/indexes/test_numpy_compat.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas.compat.numpy import np_version_under1p17, np_version_under1p18 + from pandas import ( DatetimeIndex, Float64Index, @@ -9,8 +11,6 @@ PeriodIndex, TimedeltaIndex, UInt64Index, - _np_version_under1p17, - _np_version_under1p18, ) import pandas._testing as tm from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin @@ -83,12 +83,12 @@ def test_numpy_ufuncs_other(index, func): if func in [np.isfinite, np.isnan, np.isinf]: pytest.xfail(reason="__array_ufunc__ is not defined") - if not _np_version_under1p18 and func in [np.isfinite, np.isinf, np.isnan]: + if not np_version_under1p18 and func in [np.isfinite, np.isinf, np.isnan]: # numpy 1.18(dev) changed isinf and isnan to not raise on dt64/tfd64 result = func(index) assert isinstance(result, np.ndarray) - elif not _np_version_under1p17 and func in [np.isfinite]: + elif not np_version_under1p17 and func in [np.isfinite]: # ok under numpy >= 1.17 # Results in bool array result = func(index) diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index e42d9679464d8..9a6f30ec920cc 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -5,7 +5,7 @@ import numpy as np import pytest -from pandas.compat.numpy import _is_numpy_dev +from pandas.compat.numpy import is_numpy_dev import pandas as pd from pandas import DataFrame, Series, Timestamp, date_range @@ -938,7 +938,7 @@ def test_loc_setitem_empty_append(self): df.loc[0, "x"] = expected.loc[0, "x"] tm.assert_frame_equal(df, expected) - @pytest.mark.xfail(_is_numpy_dev, reason="gh-35481") + @pytest.mark.xfail(is_numpy_dev, reason="gh-35481") def test_loc_setitem_empty_append_raises(self): # GH6173, various appends to an empty dataframe diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index cb33f99d9bd91..d4d7e4b85268f 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -7,8 +7,10 @@ import numpy as np import pytest +from pandas.compat.numpy import is_numpy_dev + import pandas as pd -from pandas import NaT, Timedelta, Timestamp, _is_numpy_dev, compat, offsets +from pandas import NaT, Timedelta, Timestamp, compat, offsets import pandas._testing as tm from pandas.core import ops @@ -426,7 +428,7 @@ def test_td_div_numeric_scalar(self): np.float64("NaN"), marks=pytest.mark.xfail( # Works on numpy dev only in python 3.9 - _is_numpy_dev and not compat.PY39, + is_numpy_dev and not compat.PY39, raises=RuntimeWarning, reason="https://github.com/pandas-dev/pandas/issues/31992", ), diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 3d45a1f7389b7..f7f3f1fa0c13d 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -6,7 +6,7 @@ import numpy as np import pytest -from pandas.compat.numpy import _np_version_under1p17 +from pandas.compat.numpy import np_version_under1p17 import pandas as pd from pandas import Series, Timestamp @@ -72,7 +72,7 @@ def test_random_state(): # Check BitGenerators # GH32503 - if not _np_version_under1p17: + if not np_version_under1p17: assert ( com.random_state(npr.MT19937(3)).uniform() == npr.RandomState(npr.MT19937(3)).uniform() diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 2368e93ddc256..da7f8b9b4a721 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -35,7 +35,7 @@ ) -@pytest.mark.skipif(not expr._USE_NUMEXPR, reason="not using numexpr") +@pytest.mark.skipif(not expr.USE_NUMEXPR, reason="not using numexpr") class TestExpressions: def setup_method(self, method): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 94c252eca1671..e9deaf3fe67de 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -35,7 +35,7 @@ def test_foo(): from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import _np_version -from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED +from pandas.core.computation.expressions import NUMEXPR_INSTALLED, USE_NUMEXPR def safe_import(mod_name: str, min_version: Optional[str] = None): @@ -195,8 +195,8 @@ def skip_if_no(package: str, min_version: Optional[str] = None): _skip_if_no_scipy(), reason="Missing SciPy requirement" ) skip_if_no_ne = pytest.mark.skipif( - not _USE_NUMEXPR, - reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{NUMEXPR_INSTALLED}", + not USE_NUMEXPR, + reason=f"numexpr enabled->{USE_NUMEXPR}, installed->{NUMEXPR_INSTALLED}", ) From ab342c68d05e2b2b0eebdb551a6c1a9c9ae55dbd Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Tue, 8 Sep 2020 23:43:52 +0100 Subject: [PATCH 485/632] DOC: Start 1.1.3 (#36183) --- doc/source/whatsnew/index.rst | 1 + doc/source/whatsnew/v1.1.2.rst | 2 +- doc/source/whatsnew/v1.1.3.rst | 42 ++++++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 doc/source/whatsnew/v1.1.3.rst diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 1827d151579a1..933ed3cb8babf 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -24,6 +24,7 @@ Version 1.1 .. toctree:: :maxdepth: 2 + v1.1.3 v1.1.2 v1.1.1 v1.1.0 diff --git a/doc/source/whatsnew/v1.1.2.rst b/doc/source/whatsnew/v1.1.2.rst index c6a08f4fb852a..81b8e7df11625 100644 --- a/doc/source/whatsnew/v1.1.2.rst +++ b/doc/source/whatsnew/v1.1.2.rst @@ -61,4 +61,4 @@ Other Contributors ~~~~~~~~~~~~ -.. contributors:: v1.1.1..v1.1.2|HEAD +.. contributors:: v1.1.1..v1.1.2 diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst new file mode 100644 index 0000000000000..e3161012da5d1 --- /dev/null +++ b/doc/source/whatsnew/v1.1.3.rst @@ -0,0 +1,42 @@ +.. _whatsnew_113: + +What's new in 1.1.3 (??) +------------------------ + +These are the changes in pandas 1.1.3. See :ref:`release` for a full changelog +including other versions of pandas. + +{{ header }} + +.. --------------------------------------------------------------------------- + +.. _whatsnew_113.regressions: + +Fixed regressions +~~~~~~~~~~~~~~~~~ +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_113.bug_fixes: + +Bug fixes +~~~~~~~~~ +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_113.other: + +Other +~~~~~ +- + +.. --------------------------------------------------------------------------- + +.. _whatsnew_113.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v1.1.2..v1.1.3|HEAD From 7d1622443f026729786616f8d5dda5a5a97be90a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Tue, 8 Sep 2020 15:53:03 -0700 Subject: [PATCH 486/632] CLN: re-use invalid_comparison in Categorical comparisons (#36229) --- pandas/core/arrays/categorical.py | 10 +--------- pandas/tests/arrays/categorical/test_operators.py | 13 +++++-------- pandas/tests/series/test_arithmetic.py | 2 +- 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f20d3d5e316f8..a2b5b54c55490 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -119,15 +119,7 @@ def func(self, other): ret[mask] = False return ret else: - if opname == "__eq__": - return np.zeros(len(self), dtype=bool) - elif opname == "__ne__": - return np.ones(len(self), dtype=bool) - else: - raise TypeError( - f"Cannot compare a Categorical for op {opname} with a " - "scalar, which is not a category." - ) + return ops.invalid_comparison(self, other, op) else: # allow categorical vs object dtype array comparisons for equality # these are only positional comparisons diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index 6ea003c122eea..bc5fb51883b3d 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -171,17 +171,14 @@ def test_comparison_with_unknown_scalars(self): # for unequal comps, but not for equal/not equal cat = Categorical([1, 2, 3], ordered=True) - msg = ( - "Cannot compare a Categorical for op __{}__ with a scalar, " - "which is not a category" - ) - with pytest.raises(TypeError, match=msg.format("lt")): + msg = "Invalid comparison between dtype=category and int" + with pytest.raises(TypeError, match=msg): cat < 4 - with pytest.raises(TypeError, match=msg.format("gt")): + with pytest.raises(TypeError, match=msg): cat > 4 - with pytest.raises(TypeError, match=msg.format("gt")): + with pytest.raises(TypeError, match=msg): 4 < cat - with pytest.raises(TypeError, match=msg.format("lt")): + with pytest.raises(TypeError, match=msg): 4 > cat tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False])) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index ef2bafd4ea2ad..c937e357b9dbc 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -501,7 +501,7 @@ def test_unequal_categorical_comparison_raises_type_error(self): # for unequal comps, but not for equal/not equal cat = Series(Categorical(list("abc"), ordered=True)) - msg = "Cannot compare a Categorical for op.+with a scalar" + msg = "Invalid comparison between dtype=category and str" with pytest.raises(TypeError, match=msg): cat < "d" with pytest.raises(TypeError, match=msg): From 9e3b8dff24eab27b96b8dbb7debec41a2ed0b5f7 Mon Sep 17 00:00:00 2001 From: attack68 <24256554+attack68@users.noreply.github.com> Date: Wed, 9 Sep 2020 01:56:46 +0200 Subject: [PATCH 487/632] CLN: w3 formatting (#36223) --- pandas/io/formats/style.py | 2 +- pandas/tests/io/formats/test_style.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 023557dd6494d..b27a4e036e137 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -327,7 +327,7 @@ def format_attr(pair): colspan = col_lengths.get((r, c), 0) if colspan > 1: es["attributes"] = [ - format_attr({"key": "colspan", "value": colspan}) + format_attr({"key": "colspan", "value": f'"{colspan}"'}) ] row_es.append(es) head.append(row_es) diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index 6025649e9dbec..de549ec3eb75e 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1691,6 +1691,12 @@ def test_no_cell_ids(self): s = styler.render() # render twice to ensure ctx is not updated assert s.find('

    ') != -1 + def test_colspan_w3(self): + # GH 36223 + df = pd.DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]]) + s = Styler(df, uuid="_", cell_ids=False) + assert 'l0` elements. + + Parameters + ---------- + classes : DataFrame + DataFrame containing strings that will be translated to CSS classes, + mapped by identical column and index values that must exist on the + underlying `Styler` data. None, NaN values, and empty strings will + be ignored and not affect the rendered HTML. + + Returns + ------- + self : Styler + + Examples + -------- + >>> df = pd.DataFrame(data=[[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) + >>> classes = pd.DataFrame([ + ... ["min-val red", "", "blue"], + ... ["red", None, "blue max-val"] + ... ], index=df.index, columns=df.columns) + >>> df.style.set_td_classes(classes) + + Using `MultiIndex` columns and a `classes` `DataFrame` as a subset of the + underlying, + + >>> df = pd.DataFrame([[1,2],[3,4]], index=["a", "b"], + ... columns=[["level0", "level0"], ["level1a", "level1b"]]) + >>> classes = pd.DataFrame(["min-val"], index=["a"], + ... columns=[["level0"],["level1a"]]) + >>> df.style.set_td_classes(classes) + + Form of the output with new additional css classes, + + >>> df = pd.DataFrame([[1]]) + >>> css = pd.DataFrame(["other-class"]) + >>> s = Styler(df, uuid="_", cell_ids=False).set_td_classes(css) + >>> s.hide_index().render() + '' + '' + ' ' + ' ' + ' ' + ' ' + ' ' + ' ' + '
    0
    1
    ' + + """ + classes = classes.reindex_like(self.data) + + mask = (classes.isna()) | (classes.eq("")) + self.cell_context["data"] = { + r: {c: [str(classes.iloc[r, c])]} + for r, rn in enumerate(classes.index) + for c, cn in enumerate(classes.columns) + if not mask.iloc[r, c] + } + + return self + def render(self, **kwargs) -> str: """ Render the built up styles to HTML. @@ -609,6 +675,7 @@ def clear(self) -> None: Returns None. """ self.ctx.clear() + self.cell_context = {} self._todo = [] def _compute(self): diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index de549ec3eb75e..e7583e1ce2ce2 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1691,6 +1691,27 @@ def test_no_cell_ids(self): s = styler.render() # render twice to ensure ctx is not updated assert s.find('
    ') != -1 + @pytest.mark.parametrize( + "classes", + [ + DataFrame( + data=[["", "test-class"], [np.nan, None]], + columns=["A", "B"], + index=["a", "b"], + ), + DataFrame(data=[["test-class"]], columns=["B"], index=["a"]), + DataFrame(data=[["test-class", "unused"]], columns=["B", "C"], index=["a"]), + ], + ) + def test_set_data_classes(self, classes): + # GH 36159 + df = DataFrame(data=[[0, 1], [2, 3]], columns=["A", "B"], index=["a", "b"]) + s = Styler(df, uuid="_", cell_ids=False).set_td_classes(classes).render() + assert '0123
    ` tag if specified. - - .. versionadded:: 0.23.0 - render_links : bool, default False Convert URLs to HTML links. @@ -3700,10 +3682,6 @@ def assign(self, **kwargs) -> DataFrame: Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. - .. versionchanged:: 0.23.0 - - Keyword argument order is maintained. - Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, @@ -6604,9 +6582,6 @@ def groupby( specified, all remaining columns will be used and the result will have hierarchically indexed columns. - .. versionchanged:: 0.23.0 - Also accept list of column names. - Returns ------- DataFrame @@ -7200,14 +7175,7 @@ def unstack(self, level=-1, fill_value=None): return unstack(self, level, fill_value) - @Appender( - _shared_docs["melt"] - % dict( - caller="df.melt(", - versionadded="\n .. versionadded:: 0.20.0\n", - other="melt", - ) - ) + @Appender(_shared_docs["melt"] % dict(caller="df.melt(", other="melt",)) def melt( self, id_vars=None, @@ -7418,7 +7386,6 @@ def _gotitem( axis=_shared_doc_kwargs["axis"], see_also=_agg_summary_and_see_also_doc, examples=_agg_examples_doc, - versionadded="\n.. versionadded:: 0.20.0\n", ) def aggregate(self, func=None, axis=0, *args, **kwargs): axis = self._get_axis_number(axis) @@ -7518,9 +7485,6 @@ def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds): applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. - - .. versionadded:: 0.23.0 - args : tuple Positional arguments to pass to `func` in addition to the array/series. @@ -7720,7 +7684,6 @@ def append( sort : bool, default False Sort columns if the columns of `self` and `other` are not aligned. - .. versionadded:: 0.23.0 .. versionchanged:: 1.0.0 Changed to not sort by default. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d7b82923e7488..d78fa42cd1056 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2214,8 +2214,6 @@ def to_json( Describing the data, where data component is like ``orient='records'``. - .. versionchanged:: 0.20.0 - date_format : {None, 'epoch', 'iso'} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For @@ -2251,9 +2249,6 @@ def to_json( Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. - - .. versionadded:: 0.23.0 - indent : int, optional Length of whitespace used to indent each record. @@ -3011,9 +3006,6 @@ def to_latex( into a main LaTeX document or read from an external file with ``\input{table.tex}``. - .. versionchanged:: 0.20.2 - Added to Series. - .. versionchanged:: 1.0.0 Added caption and label arguments. @@ -6404,9 +6396,6 @@ def replace( The method to use when for replacement, when `to_replace` is a scalar, list or tuple and `value` is ``None``. - .. versionchanged:: 0.23.0 - Added to DataFrame. - Returns ------- {klass} @@ -6836,8 +6825,6 @@ def interpolate( (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). - .. versionadded:: 0.23.0 - downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. **kwargs @@ -11349,12 +11336,6 @@ def _doc_parms(cls): min_count : int, default 0 The required number of valid values to perform the operation. If fewer than ``min_count`` non-NA values are present the result will be NA. - - .. versionadded:: 0.22.0 - - Added with the default being 0. This means the sum of an all-NA - or empty Series is 0, and the product of an all-NA or empty - Series is 1. """ diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 30bd53a3ddff1..ceee78bfebe68 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -215,8 +215,6 @@ class providing the base-class of operations. Apply a function `func` with arguments to this %(klass)s object and return the function's result. -%(versionadded)s - Use `.pipe` when you want to improve readability by chaining together functions that expect Series, DataFrames, GroupBy or Resampler objects. Instead of writing @@ -709,7 +707,6 @@ def __getattr__(self, attr: str): @Substitution( klass="GroupBy", - versionadded=".. versionadded:: 0.21.0", examples="""\ >>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]}) >>> df diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 04a63beb2ef45..3d177e08bb0f5 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1553,8 +1553,6 @@ def droplevel(self, level=0): If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. - .. versionadded:: 0.23.1 (support for non-MultiIndex) - Parameters ---------- level : int, str, or list-like, default 0 @@ -2296,8 +2294,6 @@ def unique(self, level=None): level : int or str, optional, default None Only return values from specified level (for MultiIndex). - .. versionadded:: 0.23.0 - Returns ------- Index without duplicates diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 29b7bd7a63faa..f881f79cb5c1d 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -208,7 +208,6 @@ def _assure_grouper(self): @Substitution( klass="Resampler", - versionadded=".. versionadded:: 0.23.0", examples=""" >>> df = pd.DataFrame({'A': [1, 2, 3, 4]}, ... index=pd.date_range('2012-08-02', periods=4)) @@ -283,7 +282,6 @@ def pipe(self, func, *args, **kwargs): _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, - versionadded="", klass="DataFrame", axis="", ) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 9b94dae8556f6..dd4bcf77641ef 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -122,7 +122,6 @@ def concat( This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. - .. versionadded:: 0.23.0 .. versionchanged:: 1.0.0 Changed to not sort by default. diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 33ce5ed49b9c2..7f5fb6b45f014 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -22,10 +22,7 @@ from pandas import DataFrame, Series # noqa: F401 -@Appender( - _shared_docs["melt"] - % dict(caller="pd.melt(df, ", versionadded="", other="DataFrame.melt") -) +@Appender(_shared_docs["melt"] % dict(caller="pd.melt(df, ", other="DataFrame.melt")) def melt( frame: "DataFrame", id_vars=None, @@ -274,12 +271,10 @@ def wide_to_long( A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate - suffixes, for example, if your wide variables are of the form - A-one, B-two,.., and you have an unrelated column A-rating, you can - ignore the last one by specifying `suffix='(!?one|two)'`. - - .. versionchanged:: 0.23.0 - When all suffixes are numeric, they are cast to int64/float64. + suffixes, for example, if your wide variables are of the form A-one, + B-two,.., and you have an unrelated column A-rating, you can ignore the + last one by specifying `suffix='(!?one|two)'`. When all suffixes are + numeric, they are cast to int64/float64. Returns ------- diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index e81dd8f0c735c..6ddf53b6493e3 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -764,8 +764,6 @@ def get_dummies( dtype : dtype, default np.uint8 Data type for new columns. Only a single dtype is allowed. - .. versionadded:: 0.23.0 - Returns ------- DataFrame diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index f7723bee532ff..077ad057f6e1d 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -84,8 +84,6 @@ def cut( Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. - - .. versionadded:: 0.23.0 ordered : bool, default True Whether the labels are ordered or not. Applies to returned types Categorical and Series (with Categorical dtype). If True, diff --git a/pandas/core/series.py b/pandas/core/series.py index ef9ade5c7bb15..1d2c1ef59d299 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -118,7 +118,6 @@ optional_mapper="", optional_labels="", optional_axis="", - versionadded_to_excel="\n .. versionadded:: 0.20.0\n", ) @@ -157,12 +156,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Parameters ---------- data : array-like, Iterable, dict, or scalar value - Contains data stored in Series. - - .. versionchanged:: 0.23.0 - If data is a dict, argument order is maintained for Python 3.6 - and later. - + Contains data stored in Series. If data is a dict, argument order is + maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to @@ -4047,7 +4042,6 @@ def _gotitem(self, key, ndim, subset=None) -> "Series": axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, - versionadded="\n.. versionadded:: 0.20.0\n", ) def aggregate(self, func=None, axis=0, *args, **kwargs): # Validate the axis parameter diff --git a/pandas/core/shared_docs.py b/pandas/core/shared_docs.py index 244ee3aa298db..14363dabfcdf3 100644 --- a/pandas/core/shared_docs.py +++ b/pandas/core/shared_docs.py @@ -6,7 +6,7 @@ "aggregate" ] = """\ Aggregate using one or more operations over the specified axis. -{versionadded} + Parameters ---------- func : function, str, list or dict @@ -119,8 +119,6 @@ This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. - - .. versionadded:: 0.23.0 dropna : bool, default True If True, and if group keys contain NA values, NA values together with row/column will be dropped. @@ -154,7 +152,7 @@ columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. -%(versionadded)s + Parameters ---------- id_vars : tuple, list, or ndarray, optional diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 4decd86764ccc..ab6c9cfb51414 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -602,8 +602,6 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): - Cannot be set to False if `pat` is a compiled regex or `repl` is a callable. - .. versionadded:: 0.23.0 - Returns ------- Series or Index of object @@ -2374,7 +2372,6 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): to match the length of the calling Series/Index). To disable alignment, use `.values` on any Series/Index/DataFrame in `others`. - .. versionadded:: 0.23.0 .. versionchanged:: 1.0.0 Changed default of `join` from None to `'left'`. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 09a53d5a10ae6..ddb44898dbfad 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -680,8 +680,6 @@ def to_datetime( used when there are at least 50 values. The presence of out-of-bounds values will render the cache unusable and may slow down parsing. - .. versionadded:: 0.23.0 - .. versionchanged:: 0.25.0 - changed default value from False to True. diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 4282cb41c4e91..34d9d9d8c00ef 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -278,7 +278,6 @@ def _constructor(self): _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, - versionadded="", klass="Series/Dataframe", axis="", ) diff --git a/pandas/core/window/expanding.py b/pandas/core/window/expanding.py index 46e002324ec75..319944fd48eae 100644 --- a/pandas/core/window/expanding.py +++ b/pandas/core/window/expanding.py @@ -117,7 +117,6 @@ def _get_window(self, other=None, **kwargs): _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, - versionadded="", klass="Series/Dataframe", axis="", ) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index d094cc7d70a21..00fdf0813b027 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1162,7 +1162,6 @@ def _get_window( _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, - versionadded="", klass="Series/DataFrame", axis="", ) @@ -1650,8 +1649,6 @@ def kurt(self, **kwargs): quantile : float Quantile to compute. 0 <= quantile <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} - .. versionadded:: 0.23.0 - This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: @@ -2043,7 +2040,6 @@ def _validate_freq(self): _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, - versionadded="", klass="Series/Dataframe", axis="", ) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index e9634ff0e9a05..65e95fd321772 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -124,9 +124,6 @@ Rows to skip at the beginning (0-indexed). nrows : int, default None Number of rows to parse. - - .. versionadded:: 0.23.0 - na_values : scalar, str, list-like, or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 5c3a309b0e310..b5f0bc0a832c2 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -1027,8 +1027,6 @@ def hide_index(self) -> "Styler": """ Hide any indices from rendering. - .. versionadded:: 0.23.0 - Returns ------- self : Styler @@ -1040,8 +1038,6 @@ def hide_columns(self, subset) -> "Styler": """ Hide columns from rendering. - .. versionadded:: 0.23.0 - Parameters ---------- subset : IndexSlice diff --git a/pandas/io/html.py b/pandas/io/html.py index 8354cf413814e..40fde224a7ae9 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -161,8 +161,6 @@ class _HtmlFrameParser: displayed_only : bool Whether or not items with "display:none" should be ignored - .. versionadded:: 0.23.0 - Attributes ---------- io : str or file-like @@ -181,8 +179,6 @@ class _HtmlFrameParser: displayed_only : bool Whether or not items with "display:none" should be ignored - .. versionadded:: 0.23.0 - Notes ----- To subclass this class effectively you must override the following methods: diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index e3000788cb33a..c3977f89ac42f 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -428,9 +428,6 @@ def read_json( - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. - .. versionadded:: 0.23.0 - 'table' as an allowed value for the ``orient`` argument - typ : {'frame', 'series'}, default 'frame' The type of object to recover. diff --git a/pandas/io/stata.py b/pandas/io/stata.py index b3b16e04a5d9e..df5f6c3d53d30 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -164,8 +164,6 @@ path_or_buf : path (string), buffer or path object string, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary read() functions. - - .. versionadded:: 0.23.0 support for pathlib, py.path. {_statafile_processing_params1} {_statafile_processing_params2} {_chunksize_params} @@ -2122,9 +2120,6 @@ class StataWriter(StataParser): object implementing a binary write() functions. If using a buffer then the buffer will not be automatically closed after the file is written. - - .. versionadded:: 0.23.0 support for pathlib, py.path. - data : DataFrame Input to save convert_dates : dict @@ -3000,8 +2995,6 @@ class StataWriter117(StataWriter): """ A class for writing Stata binary dta files in Stata 13 format (117) - .. versionadded:: 0.23.0 - Parameters ---------- fname : path (string), buffer or path object diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 45a3818492b44..d02f12a8e1029 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -542,12 +542,8 @@ def boxplot_frame_groupby( The layout of the plot: (rows, columns). sharex : bool, default False Whether x-axes will be shared among subplots. - - .. versionadded:: 0.23.1 sharey : bool, default True Whether y-axes will be shared among subplots. - - .. versionadded:: 0.23.1 backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to From aa6298fb757e0e26b95a9478888489d105e0e128 Mon Sep 17 00:00:00 2001 From: Thomas Dickson Date: Sun, 13 Sep 2020 21:22:36 +0100 Subject: [PATCH 541/632] ERR: Cartesian product error (#36335) --- pandas/core/reshape/util.py | 3 +++ pandas/tests/reshape/test_util.py | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index a1bf3f8ee4119..d2c08712abacd 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -39,6 +39,9 @@ def cartesian_product(X): lenX = np.fromiter((len(x) for x in X), dtype=np.intp) cumprodX = np.cumproduct(lenX) + if np.any(cumprodX < 0): + raise ValueError("Product space too large to allocate arrays!") + a = np.roll(cumprodX, 1) a[0] = 1 diff --git a/pandas/tests/reshape/test_util.py b/pandas/tests/reshape/test_util.py index 9d074b5ade425..0acadc54cec0c 100644 --- a/pandas/tests/reshape/test_util.py +++ b/pandas/tests/reshape/test_util.py @@ -65,3 +65,13 @@ def test_invalid_input(self, X): with pytest.raises(TypeError, match=msg): cartesian_product(X=X) + + def test_exceed_product_space(self): + # GH31355: raise useful error when produce space is too large + msg = "Product space too large to allocate arrays!" + + with pytest.raises(ValueError, match=msg): + dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [ + (np.arange(15128, dtype=np.int16)), + ] + cartesian_product(X=dims) From 00353d5be9f6ccc91a13181068c82c20930393a4 Mon Sep 17 00:00:00 2001 From: Rohith295 <57575037+Rohith295@users.noreply.github.com> Date: Sun, 13 Sep 2020 22:28:44 +0200 Subject: [PATCH 542/632] Pd.series.map performance (#34948) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/series.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 89d94dc0cabd6..dbc88d0b371e8 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -207,6 +207,7 @@ Performance improvements - Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`) - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) +- Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`) - Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 1d2c1ef59d299..69376d8bf80d1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -357,15 +357,19 @@ def _init_dict(self, data, index=None, dtype=None): # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: - keys, values = zip(*data.items()) - values = list(values) + # GH:34717, issue was using zip to extract key and values from data. + # using generators in effects the performance. + # Below is the new way of extracting the keys and values + + keys = tuple(data.keys()) + values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. values = na_value_for_dtype(dtype) keys = index else: - keys, values = [], [] + keys, values = tuple([]), [] # Input is now list-like, so rely on "standard" construction: From 69ff1795e547248d2bf5536ea1d64673b36e0177 Mon Sep 17 00:00:00 2001 From: Dan Moore <9156191+drmrd@users.noreply.github.com> Date: Sun, 13 Sep 2020 16:29:41 -0400 Subject: [PATCH 543/632] BUG: Ensure read_spss accepts pathlib Paths (GH33666) (#36174) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/io/spss.py | 4 +++- pandas/tests/io/test_spss.py | 7 +++++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index d789518f93f6d..8e283aec39786 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -26,6 +26,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Bug in :func:`read_spss` where passing a ``pathlib.Path`` as ``path`` would raise a ``TypeError`` (:issue:`33666`) - Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`) - Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`) diff --git a/pandas/io/spss.py b/pandas/io/spss.py index 9605faeb36590..79cdfbf15392a 100644 --- a/pandas/io/spss.py +++ b/pandas/io/spss.py @@ -7,6 +7,8 @@ from pandas.core.api import DataFrame +from pandas.io.common import stringify_path + def read_spss( path: Union[str, Path], @@ -40,6 +42,6 @@ def read_spss( usecols = list(usecols) # pyreadstat requires a list df, _ = pyreadstat.read_sav( - path, usecols=usecols, apply_value_formats=convert_categoricals + stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals ) return df diff --git a/pandas/tests/io/test_spss.py b/pandas/tests/io/test_spss.py index 013f56f83c5ec..a4894ff66ab9f 100644 --- a/pandas/tests/io/test_spss.py +++ b/pandas/tests/io/test_spss.py @@ -1,3 +1,5 @@ +from pathlib import Path + import numpy as np import pytest @@ -7,9 +9,10 @@ pyreadstat = pytest.importorskip("pyreadstat") -def test_spss_labelled_num(datapath): +@pytest.mark.parametrize("path_klass", [lambda p: p, Path]) +def test_spss_labelled_num(path_klass, datapath): # test file from the Haven project (https://haven.tidyverse.org/) - fname = datapath("io", "data", "spss", "labelled-num.sav") + fname = path_klass(datapath("io", "data", "spss", "labelled-num.sav")) df = pd.read_spss(fname, convert_categoricals=True) expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0]) From b8448b2608bd85286e0062dcdcd84a2545511298 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 13:37:18 -0700 Subject: [PATCH 544/632] BUG: iloc.__setitem__ with DataFrame value, multiple blocks, non-unique columns (#36337) --- pandas/core/indexing.py | 44 +++++++++++++++++++++++------- pandas/tests/indexing/test_iloc.py | 14 ++++++++++ 2 files changed, 48 insertions(+), 10 deletions(-) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 64da27a6574a6..9ecad335e2c3c 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1690,18 +1690,42 @@ def _setitem_with_indexer(self, indexer, value): sub_indexer = list(indexer) multiindex_indexer = isinstance(labels, ABCMultiIndex) # TODO: we are implicitly assuming value.columns is unique + unique_cols = value.columns.is_unique + + if not unique_cols and value.columns.equals(self.obj.columns): + # We assume we are already aligned, see + # test_iloc_setitem_frame_duplicate_columns_multiple_blocks + for loc in ilocs: + item = item_labels[loc] + if item in value: + sub_indexer[info_axis] = item + v = self._align_series( + tuple(sub_indexer), + value.iloc[:, loc], + multiindex_indexer, + ) + else: + v = np.nan - for loc in ilocs: - item = item_labels[loc] - if item in value: - sub_indexer[info_axis] = item - v = self._align_series( - tuple(sub_indexer), value[item], multiindex_indexer - ) - else: - v = np.nan + self._setitem_single_column(loc, v, pi) - self._setitem_single_column(loc, v, pi) + elif not unique_cols: + raise ValueError( + "Setting with non-unique columns is not allowed." + ) + + else: + for loc in ilocs: + item = item_labels[loc] + if item in value: + sub_indexer[info_axis] = item + v = self._align_series( + tuple(sub_indexer), value[item], multiindex_indexer + ) + else: + v = np.nan + + self._setitem_single_column(loc, v, pi) # we have an equal len ndarray/convertible to our labels # hasattr first, to avoid coercing to ndarray without reason. diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index bfb62835add93..d3d455f83c41a 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -369,6 +369,20 @@ def test_iloc_setitem_dups(self): df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True) tm.assert_frame_equal(df, expected) + def test_iloc_setitem_frame_duplicate_columns_multiple_blocks(self): + # Same as the "assign back to self" check in test_iloc_setitem_dups + # but on a DataFrame with multiple blocks + df = pd.DataFrame([[0, 1], [2, 3]], columns=["B", "B"]) + + df.iloc[:, 0] = df.iloc[:, 0].astype("f8") + assert len(df._mgr.blocks) == 2 + expected = df.copy() + + # assign back to self + df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]] + + tm.assert_frame_equal(df, expected) + # TODO: GH#27620 this test used to compare iloc against ix; check if this # is redundant with another test comparing iloc against loc def test_iloc_getitem_frame(self): From 4c4db192858658f8f9368a1fd032180946c48dcd Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Sun, 13 Sep 2020 21:46:08 +0100 Subject: [PATCH 545/632] BUG: xticks unnecessarily rotated (#34334) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/plotting/_matplotlib/core.py | 10 +++++--- pandas/tests/plotting/test_datetimelike.py | 28 +++++++++++++++++++++- pandas/tests/plotting/test_frame.py | 13 ++++++---- pandas/tests/plotting/test_series.py | 5 ++++ 5 files changed, 49 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index dbc88d0b371e8..bb79b91096867 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -305,6 +305,7 @@ I/O Plotting ^^^^^^^^ +- Bug in :meth:`DataFrame.plot` was rotating xticklabels when ``subplots=True``, even if the x-axis wasn't an irregular time series (:issue:`29460`) - Bug in :meth:`DataFrame.plot` where a marker letter in the ``style`` keyword sometimes causes a ``ValueError`` (:issue:`21003`) Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 8275c0991e464..602b42022f561 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1231,11 +1231,15 @@ def get_label(i): ax.xaxis.set_major_locator(FixedLocator(xticks)) ax.set_xticklabels(xticklabels) + # If the index is an irregular time series, then by default + # we rotate the tick labels. The exception is if there are + # subplots which don't share their x-axes, in which we case + # we don't rotate the ticklabels as by default the subplots + # would be too close together. condition = ( not self._use_dynamic_x() - and data.index.is_all_dates - and not self.subplots - or (self.subplots and self.sharex) + and (data.index.is_all_dates and self.use_index) + and (not self.subplots or (self.subplots and self.sharex)) ) index_name = self._get_index_name() diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index ecf378d4fc04a..78aa1887f5611 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -9,7 +9,7 @@ from pandas._libs.tslibs import BaseOffset, to_offset import pandas.util._test_decorators as td -from pandas import DataFrame, Index, NaT, Series, isna +from pandas import DataFrame, Index, NaT, Series, isna, to_datetime import pandas._testing as tm from pandas.core.indexes.datetimes import DatetimeIndex, bdate_range, date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range @@ -1494,6 +1494,32 @@ def test_matplotlib_scatter_datetime64(self): expected = "2017-12-12" assert label.get_text() == expected + def test_check_xticks_rot(self): + # https://github.com/pandas-dev/pandas/issues/29460 + # regular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + self._check_ticks_props(axes, xrot=0) + + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + self._check_ticks_props(axes, xrot=30) + + # use timeseries index or not + axes = df.set_index("x").plot(y="y", use_index=True) + self._check_ticks_props(axes, xrot=30) + axes = df.set_index("x").plot(y="y", use_index=False) + self._check_ticks_props(axes, xrot=0) + + # separate subplots + axes = df.plot(x="x", y="y", subplots=True, sharex=True) + self._check_ticks_props(axes, xrot=30) + axes = df.plot(x="x", y="y", subplots=True, sharex=False) + self._check_ticks_props(axes, xrot=0) + def _check_plot_works(f, freq=None, series=None, *args, **kwargs): import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index d2b22c7a4c2e3..ca4c2bdcc2fe1 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -48,7 +48,6 @@ def _assert_xtickslabels_visibility(self, axes, expected): for ax, exp in zip(axes, expected): self._check_visible(ax.get_xticklabels(), visible=exp) - @pytest.mark.xfail(reason="Waiting for PR 34334", strict=True) @pytest.mark.slow def test_plot(self): from pandas.plotting._matplotlib.compat import mpl_ge_3_1_0 @@ -66,6 +65,7 @@ def test_plot(self): with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.plot, subplots=True, use_index=False) + self._check_ticks_props(axes, xrot=0) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) df = DataFrame({"x": [1, 2], "y": [3, 4]}) @@ -78,7 +78,8 @@ def test_plot(self): df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10])) - _check_plot_works(df.plot, use_index=True) + ax = _check_plot_works(df.plot, use_index=True) + self._check_ticks_props(ax, xrot=0) _check_plot_works(df.plot, sort_columns=False) _check_plot_works(df.plot, yticks=[1, 5, 10]) _check_plot_works(df.plot, xticks=[1, 5, 10]) @@ -110,7 +111,8 @@ def test_plot(self): tuples = zip(string.ascii_letters[:10], range(10)) df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples)) - _check_plot_works(df.plot, use_index=True) + ax = _check_plot_works(df.plot, use_index=True) + self._check_ticks_props(ax, xrot=0) # unicode index = MultiIndex.from_tuples( @@ -304,12 +306,14 @@ def test_xcompat(self): ax = df.plot(x_compat=True) lines = ax.get_lines() assert not isinstance(lines[0].get_xdata(), PeriodIndex) + self._check_ticks_props(ax, xrot=30) tm.close() pd.plotting.plot_params["xaxis.compat"] = True ax = df.plot() lines = ax.get_lines() assert not isinstance(lines[0].get_xdata(), PeriodIndex) + self._check_ticks_props(ax, xrot=30) tm.close() pd.plotting.plot_params["x_compat"] = False @@ -325,12 +329,14 @@ def test_xcompat(self): ax = df.plot() lines = ax.get_lines() assert not isinstance(lines[0].get_xdata(), PeriodIndex) + self._check_ticks_props(ax, xrot=30) tm.close() ax = df.plot() lines = ax.get_lines() assert not isinstance(lines[0].get_xdata(), PeriodIndex) assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + self._check_ticks_props(ax, xrot=0) def test_period_compat(self): # GH 9012 @@ -486,7 +492,6 @@ def test_groupby_boxplot_sharex(self): expected = [False, False, True, True] self._assert_xtickslabels_visibility(axes, expected) - @pytest.mark.xfail(reason="Waiting for PR 34334", strict=True) @pytest.mark.slow def test_subplots_timeseries(self): idx = date_range(start="2014-07-01", freq="M", periods=10) diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 85c06b2e7b748..d56c882471a9a 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -109,6 +109,7 @@ def test_ts_area_lim(self): line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin <= line[0] assert xmax >= line[-1] + self._check_ticks_props(ax, xrot=0) tm.close() # GH 7471 @@ -118,6 +119,7 @@ def test_ts_area_lim(self): line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin <= line[0] assert xmax >= line[-1] + self._check_ticks_props(ax, xrot=30) tm.close() tz_ts = self.ts.copy() @@ -128,6 +130,7 @@ def test_ts_area_lim(self): line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin <= line[0] assert xmax >= line[-1] + self._check_ticks_props(ax, xrot=0) tm.close() _, ax = self.plt.subplots() @@ -136,6 +139,7 @@ def test_ts_area_lim(self): line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin <= line[0] assert xmax >= line[-1] + self._check_ticks_props(ax, xrot=0) def test_label(self): s = Series([1, 2]) @@ -284,6 +288,7 @@ def test_irregular_datetime(self): xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax) ax.set_xlim("1/1/1999", "1/1/2001") assert xp == ax.get_xlim()[0] + self._check_ticks_props(ax, xrot=30) def test_unsorted_index_xlim(self): ser = Series( From fc61aa97b6d1f824d79deef9c3191d99323f97b5 Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Mon, 14 Sep 2020 00:18:21 +0200 Subject: [PATCH 546/632] [TST]: Groupy raised ValueError for ffill with duplicate column names (#36326) --- pandas/tests/groupby/test_function.py | 45 -------------- pandas/tests/groupby/test_groupby.py | 20 ------- pandas/tests/groupby/test_missing.py | 84 +++++++++++++++++++++++++++ 3 files changed, 84 insertions(+), 65 deletions(-) create mode 100644 pandas/tests/groupby/test_missing.py diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 42945be923fa0..ab736b55b5743 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -495,51 +495,6 @@ def test_idxmin_idxmax_returns_int_types(func, values): tm.assert_frame_equal(result, expected) -def test_fill_consistency(): - - # GH9221 - # pass thru keyword arguments to the generated wrapper - # are set if the passed kw is None (only) - df = DataFrame( - index=pd.MultiIndex.from_product( - [["value1", "value2"], date_range("2014-01-01", "2014-01-06")] - ), - columns=Index(["1", "2"], name="id"), - ) - df["1"] = [ - np.nan, - 1, - np.nan, - np.nan, - 11, - np.nan, - np.nan, - 2, - np.nan, - np.nan, - 22, - np.nan, - ] - df["2"] = [ - np.nan, - 3, - np.nan, - np.nan, - 33, - np.nan, - np.nan, - 4, - np.nan, - np.nan, - 44, - np.nan, - ] - - expected = df.groupby(level=0, axis=0).fillna(method="ffill") - result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T - tm.assert_frame_equal(result, expected) - - def test_groupby_cumprod(): # GH 4095 df = pd.DataFrame({"key": ["b"] * 10, "value": 2}) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 69397228dd941..313b0ea2434f9 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1961,13 +1961,6 @@ def test_shift_bfill_ffill_tz(tz_naive_fixture, op, expected): tm.assert_frame_equal(result, expected) -def test_ffill_missing_arguments(): - # GH 14955 - df = pd.DataFrame({"a": [1, 2], "b": [1, 1]}) - with pytest.raises(ValueError, match="Must specify a fill"): - df.groupby("b").fillna() - - def test_groupby_only_none_group(): # see GH21624 # this was crashing with "ValueError: Length of passed values is 1, index implies 0" @@ -2133,16 +2126,3 @@ def test_groupby_column_index_name_lost(func): df_grouped = df.groupby([1]) result = getattr(df_grouped, func)().columns tm.assert_index_equal(result, expected) - - -@pytest.mark.parametrize("func", ["ffill", "bfill"]) -def test_groupby_column_index_name_lost_fill_funcs(func): - # GH: 29764 groupby loses index sometimes - df = pd.DataFrame( - [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]], - columns=pd.Index(["type", "a", "b"], name="idx"), - ) - df_grouped = df.groupby(["type"])[["a", "b"]] - result = getattr(df_grouped, func)().columns - expected = pd.Index(["a", "b"], name="idx") - tm.assert_index_equal(result, expected) diff --git a/pandas/tests/groupby/test_missing.py b/pandas/tests/groupby/test_missing.py new file mode 100644 index 0000000000000..116aed9935694 --- /dev/null +++ b/pandas/tests/groupby/test_missing.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame, Index, date_range +import pandas._testing as tm + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_groupby_column_index_name_lost_fill_funcs(func): + # GH: 29764 groupby loses index sometimes + df = pd.DataFrame( + [[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]], + columns=pd.Index(["type", "a", "b"], name="idx"), + ) + df_grouped = df.groupby(["type"])[["a", "b"]] + result = getattr(df_grouped, func)().columns + expected = pd.Index(["a", "b"], name="idx") + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_groupby_fill_duplicate_column_names(func): + # GH: 25610 ValueError with duplicate column names + df1 = pd.DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]}) + df2 = pd.DataFrame({"field1": [1, np.nan, 4]}) + df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"]) + expected = pd.DataFrame( + [[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"] + ) + result = getattr(df_grouped, func)() + tm.assert_frame_equal(result, expected) + + +def test_ffill_missing_arguments(): + # GH 14955 + df = pd.DataFrame({"a": [1, 2], "b": [1, 1]}) + with pytest.raises(ValueError, match="Must specify a fill"): + df.groupby("b").fillna() + + +def test_fill_consistency(): + + # GH9221 + # pass thru keyword arguments to the generated wrapper + # are set if the passed kw is None (only) + df = DataFrame( + index=pd.MultiIndex.from_product( + [["value1", "value2"], date_range("2014-01-01", "2014-01-06")] + ), + columns=Index(["1", "2"], name="id"), + ) + df["1"] = [ + np.nan, + 1, + np.nan, + np.nan, + 11, + np.nan, + np.nan, + 2, + np.nan, + np.nan, + 22, + np.nan, + ] + df["2"] = [ + np.nan, + 3, + np.nan, + np.nan, + 33, + np.nan, + np.nan, + 4, + np.nan, + np.nan, + 44, + np.nan, + ] + + expected = df.groupby(level=0, axis=0).fillna(method="ffill") + result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T + tm.assert_frame_equal(result, expected) From 37b26947db333834b46439395b4a18d7c6535002 Mon Sep 17 00:00:00 2001 From: smartvinnetou <61093810+smartvinnetou@users.noreply.github.com> Date: Sun, 13 Sep 2020 23:20:55 +0100 Subject: [PATCH 547/632] CLN: replaced Appender with doc (#33633) --- pandas/core/generic.py | 159 ++++++++++++++++++++--------------------- 1 file changed, 77 insertions(+), 82 deletions(-) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index d78fa42cd1056..5336d0828881b 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -54,12 +54,7 @@ from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, InvalidIndexError -from pandas.util._decorators import ( - Appender, - Substitution, - doc, - rewrite_axis_style_signature, -) +from pandas.util._decorators import Appender, doc, rewrite_axis_style_signature from pandas.util._validators import ( validate_bool_kwarg, validate_fillna_kwargs, @@ -2973,7 +2968,7 @@ class (index) object 'bird' 'bird' 'mammal' 'mammal' else: return xarray.Dataset.from_dataframe(self) - @Substitution(returns=fmt.return_docstring) + @doc(returns=fmt.return_docstring) def to_latex( self, buf=None, @@ -3002,9 +2997,9 @@ def to_latex( r""" Render object to a LaTeX tabular, longtable, or nested table/tabular. - Requires ``\usepackage{booktabs}``. The output can be copy/pasted + Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file - with ``\input{table.tex}``. + with ``\input{{table.tex}}``. .. versionchanged:: 1.0.0 Added caption and label arguments. @@ -3024,13 +3019,13 @@ def to_latex( Write row names (index). na_rep : str, default 'NaN' Missing data representation. - formatters : list of functions or dict of {str: function}, optional + formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example - ``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will + ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print @@ -3048,7 +3043,7 @@ def to_latex( longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires - adding a \usepackage{longtable} to your LaTeX preamble. + adding a \usepackage{{longtable}} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special @@ -3066,24 +3061,24 @@ def to_latex( The default will be read from the config module. multirow : bool, default False Use \multirow to enhance MultiIndex rows. Requires adding a - \usepackage{multirow} to your LaTeX preamble. Will print + \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. caption : str, optional - The LaTeX caption to be placed inside ``\caption{}`` in the output. + The LaTeX caption to be placed inside ``\caption{{}}`` in the output. .. versionadded:: 1.0.0 label : str, optional - The LaTeX label to be placed inside ``\label{}`` in the output. - This is used with ``\ref{}`` in the main ``.tex`` file. + The LaTeX label to be placed inside ``\label{{}}`` in the output. + This is used with ``\ref{{}}`` in the main ``.tex`` file. .. versionadded:: 1.0.0 position : str, optional The LaTeX positional argument for tables, to be placed after - ``\begin{}`` in the output. - %(returns)s + ``\begin{{}}`` in the output. + {returns} See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly @@ -3092,18 +3087,18 @@ def to_latex( Examples -------- - >>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'], - ... 'mask': ['red', 'purple'], - ... 'weapon': ['sai', 'bo staff']}) + >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], + ... mask=['red', 'purple'], + ... weapon=['sai', 'bo staff'])) >>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE - \begin{tabular}{lll} + \begin{{tabular}}{{lll}} \toprule name & mask & weapon \\ \midrule Raphael & red & sai \\ Donatello & purple & bo staff \\ \bottomrule - \end{tabular} + \end{{tabular}} """ # Get defaults from the pandas config if self.ndim == 1: @@ -6791,6 +6786,7 @@ def interpolate( `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. + axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. limit : int, optional @@ -6827,7 +6823,7 @@ def interpolate( downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. - **kwargs + ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns @@ -7243,11 +7239,11 @@ def isna(self: FrameOrSeries) -> FrameOrSeries: -------- Show which entries in a DataFrame are NA. - >>> df = pd.DataFrame({{'age': [5, 6, np.NaN], - ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), + >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], + ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], - ... 'name': ['Alfred', 'Batman', ''], - ... 'toy': [None, 'Batmobile', 'Joker']}}) + ... name=['Alfred', 'Batman', ''], + ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None @@ -7310,11 +7306,11 @@ def notna(self: FrameOrSeries) -> FrameOrSeries: -------- Show which entries in a DataFrame are not NA. - >>> df = pd.DataFrame({{'age': [5, 6, np.NaN], - ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'), + >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], + ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], - ... 'name': ['Alfred', 'Batman', ''], - ... 'toy': [None, 'Batmobile', 'Joker']}}) + ... name=['Alfred', 'Batman', ''], + ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None @@ -10252,10 +10248,10 @@ def pct_change( Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. - >>> df = pd.DataFrame({ - ... 'FR': [4.0405, 4.0963, 4.3149], - ... 'GR': [1.7246, 1.7482, 1.8519], - ... 'IT': [804.74, 810.01, 860.13]}, + >>> df = pd.DataFrame(dict( + ... FR=[4.0405, 4.0963, 4.3149], + ... GR=[1.7246, 1.7482, 1.8519], + ... IT=[804.74, 810.01, 860.13]), ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT @@ -10272,10 +10268,10 @@ def pct_change( Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. - >>> df = pd.DataFrame({ - ... '2016': [1769950, 30586265], - ... '2015': [1500923, 40912316], - ... '2014': [1371819, 41403351]}, + >>> df = pd.DataFrame(dict([ + ... ('2016', [1769950, 30586265]), + ... ('2015', [1500923, 40912316]), + ... ('2014', [1371819, 41403351])]), ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 @@ -10691,43 +10687,43 @@ def _doc_parms(cls): _num_doc = """ -%(desc)s +{desc} Parameters ---------- -axis : %(axis_descr)s +axis : {axis_descr} Axis for the function to be applied on. skipna : bool, default True Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a - particular level, collapsing into a %(name1)s. + particular level, collapsing into a {name1}. numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. -%(min_count)s\ +{min_count}\ **kwargs Additional keyword arguments to be passed to the function. Returns ------- -%(name1)s or %(name2)s (if level specified)\ -%(see_also)s\ -%(examples)s +{name1} or {name2} (if level specified)\ +{see_also}\ +{examples} """ _num_ddof_doc = """ -%(desc)s +{desc} Parameters ---------- -axis : %(axis_descr)s +axis : {axis_descr} skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a - particular level, collapsing into a %(name1)s. + particular level, collapsing into a {name1}. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. @@ -10737,7 +10733,7 @@ def _doc_parms(cls): Returns ------- -%(name1)s or %(name2)s (if level specified) +{name1} or {name2} (if level specified) Notes ----- @@ -10745,11 +10741,11 @@ def _doc_parms(cls): default `ddof=1`)\n""" _bool_doc = """ -%(desc)s +{desc} Parameters ---------- -axis : {0 or 'index', 1 or 'columns', None}, default 0 +axis : {{0 or 'index', 1 or 'columns', None}}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the @@ -10763,24 +10759,24 @@ def _doc_parms(cls): then use only boolean data. Not implemented for Series. skipna : bool, default True Exclude NA/null values. If the entire row/column is NA and skipna is - True, then the result will be %(empty_value)s, as for an empty row/column. + True, then the result will be {empty_value}, as for an empty row/column. If skipna is False, then NA are treated as True, because these are not equal to zero. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a - particular level, collapsing into a %(name1)s. + particular level, collapsing into a {name1}. **kwargs : any, default None Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- -%(name1)s or %(name2)s - If level is specified, then, %(name2)s is returned; otherwise, %(name1)s +{name1} or {name2} + If level is specified, then, {name2} is returned; otherwise, {name1} is returned. -%(see_also)s -%(examples)s""" +{see_also} +{examples}""" _all_desc = """\ Return whether all elements are True, potentially over an axis. @@ -10843,14 +10839,14 @@ def _doc_parms(cls): """ _cnum_doc = """ -Return cumulative %(desc)s over a DataFrame or Series axis. +Return cumulative {desc} over a DataFrame or Series axis. Returns a DataFrame or Series of the same size containing the cumulative -%(desc)s. +{desc}. Parameters ---------- -axis : {0 or 'index', 1 or 'columns'}, default 0 +axis : {{0 or 'index', 1 or 'columns'}}, default 0 The index or the name of the axis. 0 is equivalent to None or 'index'. skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result @@ -10861,21 +10857,21 @@ def _doc_parms(cls): Returns ------- -%(name1)s or %(name2)s - Return cumulative %(desc)s of %(name1)s or %(name2)s. +{name1} or {name2} + Return cumulative {desc} of {name1} or {name2}. See Also -------- -core.window.Expanding.%(accum_func_name)s : Similar functionality +core.window.Expanding.{accum_func_name} : Similar functionality but ignores ``NaN`` values. -%(name2)s.%(accum_func_name)s : Return the %(desc)s over - %(name2)s axis. -%(name2)s.cummax : Return cumulative maximum over %(name2)s axis. -%(name2)s.cummin : Return cumulative minimum over %(name2)s axis. -%(name2)s.cumsum : Return cumulative sum over %(name2)s axis. -%(name2)s.cumprod : Return cumulative product over %(name2)s axis. +{name2}.{accum_func_name} : Return the {desc} over + {name2} axis. +{name2}.cummax : Return cumulative maximum over {name2} axis. +{name2}.cummin : Return cumulative minimum over {name2} axis. +{name2}.cumsum : Return cumulative sum over {name2} axis. +{name2}.cumprod : Return cumulative product over {name2} axis. -%(examples)s""" +{examples}""" _cummin_examples = """\ Examples @@ -11350,7 +11346,8 @@ def _make_min_count_stat_function( see_also: str = "", examples: str = "", ) -> Callable: - @Substitution( + @doc( + _num_doc, desc=desc, name1=name1, name2=name2, @@ -11359,7 +11356,6 @@ def _make_min_count_stat_function( see_also=see_also, examples=examples, ) - @Appender(_num_doc) def stat_func( self, axis=None, @@ -11406,7 +11402,8 @@ def _make_stat_function( see_also: str = "", examples: str = "", ) -> Callable: - @Substitution( + @doc( + _num_doc, desc=desc, name1=name1, name2=name2, @@ -11415,7 +11412,6 @@ def _make_stat_function( see_also=see_also, examples=examples, ) - @Appender(_num_doc) def stat_func( self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs ): @@ -11439,8 +11435,7 @@ def stat_func( def _make_stat_function_ddof( cls, name: str, name1: str, name2: str, axis_descr: str, desc: str, func: Callable ) -> Callable: - @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr) - @Appender(_num_ddof_doc) + @doc(_num_ddof_doc, desc=desc, name1=name1, name2=name2, axis_descr=axis_descr) def stat_func( self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs ): @@ -11471,7 +11466,8 @@ def _make_cum_function( accum_func_name: str, examples: str, ) -> Callable: - @Substitution( + @doc( + _cnum_doc, desc=desc, name1=name1, name2=name2, @@ -11479,7 +11475,6 @@ def _make_cum_function( accum_func_name=accum_func_name, examples=examples, ) - @Appender(_cnum_doc) def cum_func(self, axis=None, skipna=True, *args, **kwargs): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: @@ -11517,7 +11512,8 @@ def _make_logical_function( examples: str, empty_value: bool, ) -> Callable: - @Substitution( + @doc( + _bool_doc, desc=desc, name1=name1, name2=name2, @@ -11526,7 +11522,6 @@ def _make_logical_function( examples=examples, empty_value=empty_value, ) - @Appender(_bool_doc) def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs): nv.validate_logical_func(tuple(), kwargs, fname=name) if level is not None: From 5ad15f8762cbf782c20e5ad25715053d4d1f2c6d Mon Sep 17 00:00:00 2001 From: rxxg Date: Mon, 14 Sep 2020 00:28:12 +0200 Subject: [PATCH 548/632] Ensure resource closure in all exceptional circumstances during construction (#35566) (#35587) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/io/sas/sas7bdat.py | 8 ++++++-- pandas/io/sas/sas_xport.py | 6 +++++- pandas/io/sas/sasreader.py | 10 +++++----- pandas/tests/io/sas/data/corrupt.sas7bdat | Bin 0 -> 292 bytes pandas/tests/io/sas/test_sas7bdat.py | 8 ++++++++ 6 files changed, 25 insertions(+), 8 deletions(-) create mode 100644 pandas/tests/io/sas/data/corrupt.sas7bdat diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bb79b91096867..bbee0062cf0ce 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -294,6 +294,7 @@ MultiIndex I/O ^^^ +- :func:`read_sas` no longer leaks resources on failure (:issue:`35566`) - Bug in :meth:`to_csv` caused a ``ValueError`` when it was called with a filename in combination with ``mode`` containing a ``b`` (:issue:`35058`) - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 76dac39d1889f..f2ee642d8fd42 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -142,8 +142,12 @@ def __init__( self._path_or_buf = open(self._path_or_buf, "rb") self.handle = self._path_or_buf - self._get_properties() - self._parse_metadata() + try: + self._get_properties() + self._parse_metadata() + except Exception: + self.close() + raise def column_data_lengths(self): """Return a numpy int64 array of the column data lengths""" diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 1a4ba544f5d59..9727ec930119b 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -264,7 +264,11 @@ def __init__( # should already be opened in binary mode in Python 3. self.filepath_or_buffer = filepath_or_buffer - self._read_header() + try: + self._read_header() + except Exception: + self.close() + raise def close(self): self.filepath_or_buffer.close() diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index ae9457a8e3147..31d1a6ad471ea 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -136,8 +136,8 @@ def read_sas( if iterator or chunksize: return reader - data = reader.read() - - if ioargs.should_close: - reader.close() - return data + try: + return reader.read() + finally: + if ioargs.should_close: + reader.close() diff --git a/pandas/tests/io/sas/data/corrupt.sas7bdat b/pandas/tests/io/sas/data/corrupt.sas7bdat new file mode 100644 index 0000000000000000000000000000000000000000..2941ffe3ecdf5c72773d9727c689b90dced8ebdf GIT binary patch literal 292 zcmZQzK!8K98WT2)2%g_NiGzXjxM7ckynvvR5`(cZBa;yeTp2SXinuYOvN3}l1A_oF zBTPxKW3Ymor;n?%e^5|pK!^e^08@{Pc5w`G1nC9I$L;E@kE^@m2&2Jz6!tj4Xce&S gj10_R0SIBKXJBGr=xY{XW)dG96lQ3KBu5Gp0N4g1L;wH) literal 0 HcmV?d00001 diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 8c14f9de9f61c..9de6ca75fd4d9 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -217,6 +217,14 @@ def test_zero_variables(datapath): pd.read_sas(fname) +def test_corrupt_read(datapath): + # We don't really care about the exact failure, the important thing is + # that the resource should be cleaned up afterwards (BUG #35566) + fname = datapath("io", "sas", "data", "corrupt.sas7bdat") + with pytest.raises(AttributeError): + pd.read_sas(fname) + + def round_datetime_to_ms(ts): if isinstance(ts, datetime): return ts.replace(microsecond=int(round(ts.microsecond, -3) / 1000) * 1000) From a3c4dc8b087476088d181be6a29dc980098fb4eb Mon Sep 17 00:00:00 2001 From: Irv Lustig Date: Sun, 13 Sep 2020 18:54:41 -0400 Subject: [PATCH 549/632] Change default of float_precision for read_csv and read_table to "high" (#36228) --- doc/source/whatsnew/v1.2.0.rst | 13 ++++++++++ pandas/_libs/parsers.pyx | 7 +++-- pandas/io/parsers.py | 7 ++--- pandas/tests/io/parser/test_c_parser_only.py | 27 +++++++++++++++++--- 4 files changed, 46 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index bbee0062cf0ce..b2e724ad868ce 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -96,6 +96,19 @@ For example: buffer = io.BytesIO() data.to_csv(buffer, mode="w+b", encoding="utf-8", compression="gzip") +:.. _whatsnew_read_csv_table_precision_default: + +Change in default floating precision for ``read_csv`` and ``read_table`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For the C parsing engine, the methods :meth:`read_csv` and :meth:`read_table` previously defaulted to a parser that +could read floating point numbers slightly incorrectly with respect to the last bit in precision. +The option ``floating_precision="high"`` has always been available to avoid this issue. +Beginning with this version, the default is now to use the more accurate parser by making +``floating_precision=None`` correspond to the high precision parser, and the new option +``floating_precision="legacy"`` to use the legacy parser. The change to using the higher precision +parser by default should have no impact on performance. (:issue:`17154`) + .. _whatsnew_120.enhancements.other: Other enhancements diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 811e28b830921..b87e46f9b6648 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -476,10 +476,13 @@ cdef class TextReader: if float_precision == "round_trip": # see gh-15140 self.parser.double_converter = round_trip - elif float_precision == "high": + elif float_precision == "legacy": + self.parser.double_converter = xstrtod + elif float_precision == "high" or float_precision is None: self.parser.double_converter = precise_xstrtod else: - self.parser.double_converter = xstrtod + raise ValueError(f'Unrecognized float_precision option: ' + f'{float_precision}') if isinstance(dtype, dict): dtype = {k: pandas_dtype(dtype[k]) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index b963d5be69b5f..2780b1a7f86c9 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -338,9 +338,9 @@ option can improve performance because there is no longer any I/O overhead. float_precision : str, optional Specifies which converter the C engine should use for floating-point - values. The options are `None` for the ordinary converter, - `high` for the high-precision converter, and `round_trip` for the - round-trip converter. + values. The options are `None` or `high` for the ordinary converter, + `legacy` for the original lower precision pandas converter, and + `round_trip` for the round-trip converter. Returns ------- @@ -2284,6 +2284,7 @@ def TextParser(*args, **kwds): values. The options are None for the ordinary converter, 'high' for the high-precision converter, and 'round_trip' for the round-trip converter. + .. versionchanged:: 1.2 """ kwds["engine"] = "python" return TextFileReader(*args, **kwds) diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 50d5fb3e49c2a..7c58afe867440 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -160,7 +160,9 @@ def test_precise_conversion(c_parser_only): # 25 decimal digits of precision text = f"a\n{num:.25}" - normal_val = float(parser.read_csv(StringIO(text))["a"][0]) + normal_val = float( + parser.read_csv(StringIO(text), float_precision="legacy")["a"][0] + ) precise_val = float( parser.read_csv(StringIO(text), float_precision="high")["a"][0] ) @@ -608,7 +610,7 @@ def test_unix_style_breaks(c_parser_only): tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"]) +@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"]) @pytest.mark.parametrize( "data,thousands,decimal", [ @@ -646,7 +648,7 @@ def test_1000_sep_with_decimal( tm.assert_frame_equal(result, expected) -@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"]) +@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"]) @pytest.mark.parametrize( "value,expected", [ @@ -702,3 +704,22 @@ def test_1000_sep_decimal_float_precision( ) val = df.iloc[0, 0] assert val == expected + + +def test_float_precision_options(c_parser_only): + # GH 17154, 36228 + parser = c_parser_only + s = "foo\n243.164\n" + df = parser.read_csv(StringIO(s)) + df2 = parser.read_csv(StringIO(s), float_precision="high") + + tm.assert_frame_equal(df, df2) + + df3 = parser.read_csv(StringIO(s), float_precision="legacy") + + assert not df.iloc[0, 0] == df3.iloc[0, 0] + + msg = "Unrecognized float_precision option: junk" + + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(s), float_precision="junk") From 88bc2e4c503b0c9fa18020f0f893aac8da86549a Mon Sep 17 00:00:00 2001 From: Asish Mahapatra Date: Sun, 13 Sep 2020 18:57:44 -0400 Subject: [PATCH 550/632] BUG: read_excel for ods files raising UnboundLocalError in certain cases (#36175) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/io/excel/_odfreader.py | 26 +++++++++++++----------- pandas/tests/io/data/excel/gh-35802.ods | Bin 0 -> 12692 bytes pandas/tests/io/data/excel/gh-36122.ods | Bin 0 -> 8974 bytes pandas/tests/io/excel/test_readers.py | 17 ++++++++++++++++ 5 files changed, 32 insertions(+), 12 deletions(-) create mode 100755 pandas/tests/io/data/excel/gh-35802.ods create mode 100755 pandas/tests/io/data/excel/gh-36122.ods diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index b2e724ad868ce..8b18b56929acd 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -315,6 +315,7 @@ I/O - :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue: `35058`) - :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) - :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) +- Bug in :meth:`read_excel` with `engine="odf"` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, and :issue:`35802`) Plotting ^^^^^^^^ diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index ffb599cdfaaf8..4f9f8a29c0010 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -197,22 +197,24 @@ def _get_cell_string_value(self, cell) -> str: Find and decode OpenDocument text:s tags that represent a run length encoded sequence of space characters. """ - from odf.element import Element, Text + from odf.element import Element from odf.namespaces import TEXTNS - from odf.text import P, S + from odf.text import S - text_p = P().qname text_s = S().qname - p = cell.childNodes[0] - value = [] - if p.qname == text_p: - for k, fragment in enumerate(p.childNodes): - if isinstance(fragment, Text): - value.append(fragment.data) - elif isinstance(fragment, Element): - if fragment.qname == text_s: - spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) + + for fragment in cell.childNodes: + if isinstance(fragment, Element): + if fragment.qname == text_s: + spaces = int(fragment.attributes.get((TEXTNS, "c"), 1)) value.append(" " * spaces) + else: + # recursive impl needed in case of nested fragments + # with multiple spaces + # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704 + value.append(self._get_cell_string_value(fragment)) + else: + value.append(str(fragment)) return "".join(value) diff --git a/pandas/tests/io/data/excel/gh-35802.ods b/pandas/tests/io/data/excel/gh-35802.ods new file mode 100755 index 0000000000000000000000000000000000000000..f3ad061f1d995488bf029f926dc3eba60fdded2f GIT binary patch literal 12692 zcmdseWmp|c)-DoULU4C?cMnc*cXx-4ySozzPH=a3cZZ+>g1ft4nEB4+%(-Xg`R?Dl z_S3zqclUa$YgKiv)vMkuD+&4m83+gx2q@G5MJ~{iBa9XZ2L-YWd9BG4d!2h z{#N>IV{L3^;%NU5GzUgHfQ^l%zMehZ|C^SvrHvlI=>Jvg?M@y4dtI1+bBDGzwvM*% zs{iF4|4z>Vpa*bt_+1|)B;-G={4aC770CXkK0N~iBTJ(<1KHTq893V8znj;^$`TYv zRuba9SAam?cW?5aO@{q;_OFJvwYM>`H*)yX4gJyW{{Z`+-QP9jy^C!1tc@)HivjTd zrbaU>Jrg4bIzcmlm7cA`|0w^R6=2_#t@NzTjEx)swDyL^WAS6w{`3d|r`&-?;|u(B zU|{y-!%gOyoh(7BV00HLh^y^U@yc?!#Rr+Qo8=-*Yf-hS$9mX<;njs*ip&NSB*S8+ zb^BDDEwoK}%bZIK^;CE%IOpd6d6|^W&eoJ1I*EZaY_OISHQD4YDVN+bt%O}a*zLq% z`)i{hp>QJw&91jRFgeCIpo~@I0EbyIuDAF+;VYa)S=G>b_3~}O_4u*A{E<*!h64cp z*v-J27x`oasAX zBTCRsV?2>LXyI~w-F=~*c*WSLsC5#u^^(7Fb7+$r72iC)-jP-cZ$2kp(?V1j@PAH2s;WS}92kX;!lb!>h{-nVYW&Lna{ zc~oYce_}WQXhXtrNK0;;9D%%vF!Cy+;H50`)=CWu&-Q6==Ew5tK6WqRE?$=74hWiVtNIdhWtK?kbFS-bbi)0iEqq;EyeLdmU9UXsiLtP!P-ss5qx zpdIGq<)tbesXjnO0O^P5X9s$8I!ZrVkjb-ri!??-3{ttcsa&xd?#xF`#^IovWC;X* z4t$qL0}=b6OGqR3oPZs1D6|D;CFUO5W-uum&x!-dv^E{23*Tv(1g~_mHbJepdz*40 z6%>wcI!Fg2zXm~=LzK0VGL+0VclyYUbpp0q`P+f9Tke%yJ6rIDqq2L7Mt_E8*Llew zK#U(c5T< z=(h*V0pMzBTUz2fA1weO(S zW31pHXH8cVo*B<|(UsBFGSzX7==8e4Lu2I%I9iVUh{-znym`4ot*(#u^kgy%*RadY znG(($06&_sIXDYvzUSqn=x+0eoAu1qh2^sE>yg$L1EBRDU6(piM^a)&Bg^cxH-o}k zHLOwPbycw@E?6))l?~^0k;l4J#myWQHG0u-PR=!S z9blr)C2hNysS7kG~LH@ubo$g`B6DY^Z9ffmzj79Hp10 zF@Yz)7^oqCO%BIdzRsCebX^Ues;00i8fv`G3yiP(xFFj4)ojsl1#PC%Yoey8R=dLF z$XXL{iZ{BEePyi(u(%w)+(_=8qpi$r+qUXojdQcNbf!U_;bzU0*q}PZX??@nj{h$LWZ6iucf|5d{%ln zVQ#m9%3VFXJsS7E1)3_Y)C%Wg^ul&MMAfsJ4Usu-($ec93nWPBhPs)Zd3J*R_Dfwx zm7pBlO!+!KL2)RmJizbCQ-Dop%!`C=@zv{jdvQ$0gwIYF z7;3p@j88(sAA|1Q_?~e&>TebDHEr!U#aX0AV7M$ljUeEiXTVKJezk?wA{auH0?!~c-(49#^?oLpar6_E0Kco zs=5v0bB)f733SSpu2M0AcJiAbTW$cYX?+~={=(Zql|ZY3@TFNzP6n<5g1{y>FsfnC zG|1O>)}EZ4NJgM1pCdAiYfzXAjl;c*U=!8J(_d(!i$*@BR-eny+J(1zuTl7$K@pC0 z%#`d&D%(bd-xs2f9Bjo|95e?XMsuW6Kd7%+H4xqpQBrJCxoZ4^Sip%va;BDK^9y}IVvN2E z#_L$S=UW+|4aXjUl_JR1bekKEiF0Yvm=*lBONWN=%cmVJ$ux-6rJ<4LjG!6JVSglu z8aI(xy!|Hyy7Grj^}SG|z0gQ4HfI3!54M~ndip;8pMugY!bUMWY&*!8;F_%Tu>4OZ ztCQXdiO&Vw=atRvvbl^~RU&44$On-eflHu8u#4Wa#xFz;0jF*B)@nYB{A!^kHM1#t zQ819nFd?C``>~~xJACvjmHZwn1Bu=OeC@dMO9ET;-*!!hTYo0>&MUfR8tsui96_G`-rwInx1j&&gx5XJ{OnTH$ zY=Z5l9*qS47U;^)QFGT1ybH>~2P}T%d{u>(9FNkPnNnXjf@v3jDvX<-jmARH6(9Jy zRrpA&H-`r9UbT{@%d7ijF8AJ1!_W;%bZuF5eiO+lWQj5zgGg;t226`l^okKzUZ&eU zMSwG>iiP=mnp!TSajZnLQATsP4)$`k+A$O+yn57WWq`u;chw)iR_B~pN;4_l`^U}O zZ8>%!PcWvv1!N6?Yy#l!dcTZhR_fJt*4x_s1wL&?22UtuN<|bOKRMop-+ZY)~exh&w}vsu?QI2^ZpU+#f5Dm7@~1cUF$? zL5fuF`Fv1;-UM|m?WURWNR!argon5LQ_^|lT3%~n=t@6t*x|mXW3*4Hh=oz(R>@cu zrY<$BoxPVfGco}VeZ=~+ThUrfdm7|C&``w!=S0WT^6q{QpAK=SwpjU8XVnRBo1@{G zUj))&K2&KKb5Lze&+E%aS>#eNoQ|l;kK733eVRF4wG!)M{>OMNhEI!=bvrz#n=F$U zy*ilrZIswGbp-kDKQfqlUW4IKwAvAW}|==ytjFgNLtabl4!sZvHe4ic)5o|nU%$)^4yhcwc31- z1{;e9G@!h_S%v-FVxVen4BJAj9ut=zvClblt(!}uhZ)T6fo9s{C}Y^L{G%DM&xNd? zhF!iQ2*o;W>c&^z48SJBm&9L9H_*K?=V=NL#FTU{Jpph*?K)yD-5hEPF8d{WB<(G}V;xW=fm#CSeH|3trp z-?@T7(hcHlkp_uMT3+44--9m<7KMz0FRw@&r@x+9Mm|*FWyb-`T_=zV?4D?Yi_><2 zLcLg57T?Ct9B*OjL~AXOv@sDvZki=92RUW6nrY2KqCIXiWN4c(leuZnn!5H7O?KU0(0Y75&MF&GmKtpK+<69^d4txzehb*zgvI ztdVQ=WUoy9!@<_(iRhAn?eEPSNr7%wBrG)iv-ZxLn|il*@)T0z{Ml{X0xjj9lyXvj za$}6RsS-_wO)DJufnS9ZQ))?FN+ME&Qz-&o?Lu{t!=M^)ZzleyD?CEYOa*M30 zy|V)6pA@N6UPG5<_o9TTjcI@;rZXsSreaNJMEiLN|9!KEUL{8bS4iA9t>Xqas5?+p z;v52j6E4~?<2@M`}ReHC0#@~$!?dI#y&Z) z)#Q{~^sFk@guR?C>r@y|WmB#W^Rtk+=4Azdnc_&^{)g4Sn#?56wr2f^&O2-0&p&VjKF2&xR z=Ho41P%RIT6D^Z}KKO?=A-v~|5o}kv=TV_0!Si(+w$or&n-XL}gxT`eO*ePijwPaFB0BS(V5L>xCI-GycQ|8&Sq}50ZC9lHy=zHzJ{#f9 zID@F0csRxN*1!N>l=32xRmyG>7Plva4=5ZqUf$wcv8}N(KSomLZnIZ`_~-=AiaRrV3prNm|dvV_S9O-A+)H1ZR&{{dIdAyW}wq(E1 zU&GlnocMfZ;ur(3Ve&wKl&r3yy>L`hSsO@c^YLdQnsUmF*7CHV2K@3-0O(S;t1M~NM3`|DEX)-lGDqJ@I0r+7X1#nXLAJikeQXnI_yvnufyyyR zJ%7^F)ywjA!4lW>ww&=M>A)>cl!(JwV@;KP)#P;t9Mh1>!VB}&#vypjh7tp=&Pm!^ z+3fFADlPen+SZ>?pP}QSuj(AZ45ssAUx80!Ujvo4m==wl@I9Cw%+kOtl zjX~iA9?@b!-2NiqT>c_Eoc{SgANiGEDxyB?eI}2g2`C|wGmU^4$&Oh!K5i%p63mH7 z#e}JpiWKgVf&(*D*gW8kg2Eu9z;`E7xy6vu4GEUWZmYoP4D6OzS;_it+X&L6kn3}d zOabQ{h8&ZgU9xFANiPMwXy43<7~Fq{72eQsk?IaPTxg1IkMsq2`GW$cwI2n(31I|4 z8V(b#Ty)?EQ2=^G@uT*-5+zRK6xwbX0f!A-zw4oKS^)?xUqtz5KX93YJ7tXi5W9&| z2z2mbgTZ4(0+S=kg$OMxb_10Xk8d@SOIXX@c)<3SrEG#Bk0foYyQ{X}^VN67zK(OK z_-<)}ML2v7N`v`EPtPgn1LxnLgi0?GVUy-a@Og!ym8D3K6unRv3fx@WPeAq*`|{Q$ zJSrcLrBxWBhj{N{FP6DXZ-&}Kn75Q))`SppkO?97y3GRXVT-A{bx71OC0gv!Gld$9 zn}G6}$KLX^AdRytvNeUvQg41T8?{wsVjx&iVBoCI6YvVv1g>Q4Mp|i%hmF+nQ--e@ zeI|I9C$dDASYESHwy;r%8nsTPo4qgbfh_{iZK`8*eyHMleZ8l5ThjW1$s@$ZJ?*Iz zsJK4#v34PvWDf3>eb8vvfsC8-0yc^WQRs##FX>x(#yNx;zc#zq2By`Pm-!;CUdqh2 ziRIDF%k$L~>$SS7*fx+@9e}R8nH6=q7zB0wn$vV}+jJ0A;Is9oNIIbu5qLf->&N)x zO?4}#@A|D9m>9F<=Tn1y(}6SlM9XFQ>7`*#^}-?tq6%kPVUd9oC-+-U9?|vYEu5ri zy)7jBu}BlFP>Ws-i2IrG)g&TmHJMCAVKkWMIaZITunT=|fe2@NMt6hE%s%B05?l0e zhquZoIiB#F65x`x;yfZwXrF!j> zW7$$pPt6%syxsks~7A-4Gf`FX`VX^w7f=|Rh^iMNcB2AYj60#lZB#e;-d{dCZ z5NbKw5eTPZ=@gG~$rhmVaS1H1jd9_0OAyv>pqE>b>s{w7_m6^gV%I8F+C64N;4TEu z=7^MTWN%LM(-3_WCaz~X4+cf|uX!InmBz8Nrq%SIlg@i1A|Uo0QFqlumx~nJL?#v5 zp>d)2N!CoUK^s&J)l7{GkMkPbDCFEK91Jbc91jdhmneU0{h{t)UYpemR%8+1*;Ppc ztj0z8qP~T8QPCeiX1WdI3G_U`wOm)3D#KWM6@ZeSJ0c)cGQR!5r8Uz|tz4}7DDTN! zS)`LlsZ{D&v`T#xr7_?f#4ULlFZ6Ct_jjc1BO7le9dpX)_bw>!vr{9jA7 z&y&>9^#oFe`x!xLjV;_QK{6sN>$-}rR^pb~^;CTS!NH{=!bIp6p0qs<;rja_#~8&AK$esF`$T6?k)U64u| zEF{K=0#p9LU`-cKC#lziiRGOO;)}>OygdhQ?w)U`g$bjORh}%}S3M{2EfP!bo~Yx* z&s3QLSb-c|oMqoZl!Un&>D})L?viBu>xHI%-QrCEiSsZxhF<4IAYVPI1zuV)hDXCd zQ4<6M801jF>_`TClZB>J_q?;wB?Mr2pUe=UJQ{y8Kl-UJ)YdMDjf{CXvL&Ix2rJKm zdG-c9qfw>r2Ae&*0mcUnYYf2>+pjCN$*4ZetK09W@p=m-Icjj>ZVb-2-+8~M^PWhx zlar8ufH-i0|0liwcL`sZw{)I?jWytZRtDNsHEfnyk-gR`=$UmFa1HyUxtO&#vL%(X zG^T+ixsGVE@`ysINT7+gCB2{FDPr>thm_{a=<0hN9^oefgPXmY=oTzt1m(3?KZM^4 z)B7{-K+)8s{(5eiUl0Ga{g7&rkhw?HZ+ni7$;nT~O9TBIVKqXE5 zwImK1UxiZp99n{^h4ZT#M$4ER^>$t}lzm>Y67sQA%l(P{xG-wlBhhrNY?rohA{UZ) zcTi&z8d{PZDv406t)$*<+1G{LV)O)6O)V|qpaWGB9J#iT7q>?52nTX8G07q^41WiF zGC`>ou%4q9x`stXHN!r3WEAW5Am0k&Pljwt;X8f0j~m&X;QW%zT@CGg(H|5w$w$gI z8Gd}*Id&<5hb4V1N%q_aK~APDb@g22$=MD*XP#6XNx3Kzxrqy;z=ML8DJWj27^cyp zH-M-q24ljHX9odS7ZM4-@BRw9`}AciLwM|wL5enh-^@PwZZ%gE`B8ZR!{(TpxJv7LY*!cM(0n!I|~Dlivos%h9=- z6Ha!N!!FVdCF2)mzjNo1F_`{3kP1mFwC;U>&~6zvC&fd*QLW@b~4(-P>e(BN$5i& zg(Pe0u2Ft z!7CZQ3tBCiTP`{?PmQ^8=)5$B!1%BEJHRhrQ0cqBY&cYgR(T?M%!39|@fHW=qB&%# zMePpvJuP06VDf!zOhmYlnDVQfOj6Dc+HXX`=(j?HNh9G8KpDcX)nX=LQQ#@y4oy^kPV?@Xy6}0~| z2Ut{J;kVY6Q)y=`oR9rZT{R`!G)G;RW5?*vAwj5UUpZIG5eG0gy3I@VboqY;!XGIo zwFZ2OV)YTpb3Hl3K>cb^TEndvJiLFz)2H^JRuNgbIj}`;4$UJ}#QNI>_g8cB#C- zy;}dymh}a%m#3w0WW@!t;ApFJaeGSYm>$<1jG~{S;c{ z*Yja;U2@*r&`C04<#1XBgwx6DU z=x^gD2{U)Ff9j=U7j3?Fi@zarH2HM+<~M~8Xq zWU!js$zM8MG+i1Y(qmoc74+SM=&aSVLV*JT5xy<9|8s%(?>xx+imQVW0AOZq^3Nq9 zbt~s=PJ|aPZ{cX2Q!}L{Hx-#f+}d`4N*PG|ISqe8DZx6iKyAkh`0Q#qRawbojr+-i z09c$!(2(IyvTw@-)khs_g3XY)N6qu%m=>Sje0ajk$Dfm?+Frw+9zpfRIDUS#AF{L6v=;us6d|KS3tH1dWt#CP*#Yl=o;kR52!eQrpTiMB|3^UKG)5YZ5*sYloI_ z{o|4FkPyu}+oab|TLnH`wxrbpxJ06j!C5GIiriRja8o~d3SFCQJ*gk^Af67*~ADRmafOC0P6#zESTiJ!0cP zvv^+X=CcbguIHCT&#~|sond*AUQ6(wgIAV>rZCya$7*#hWTE_UEw9NLFFz4OUPP<* zffIy%L3D*&o&5*6E_}|f2U53vA={Naz3}hPjaG7D1o%zGy1?9a4hR~UNSxrBp=3jp z#@~W)*dcB>#phskGw_c(hvu6o_U+SzqE-g%+{6cUmbvYnvT88;7C@+D>5y$k$RzGJ zr(RtF#95@FGuacmLOjxEjRw8nVy0BRGhS{&&)gLa!P3VvqGb<}6`Uru5~l02e&SPJcSEn8^NiXv^1CKuCOZSD*Wl zJsW|Mn^g>%18HSHLh5GF5yZpa+y|!dRM8A_0cga130Wgqd=fut{Hv&Il9*VQ_L%iJwF>0#a*d(Jx^0*N;*pXru7 z)Ueal@}^kOo^e)uO?A||920N&i8J^*9414Cc@ix(Z+>gL5`J`Wmsrr`6OoawEVltW zyyAxLgsZr!w8?>})8nL(38k zK%MX09!;JXIx8U#mQHXPDT62vo?Fj!Cwif}JF>)RYC+%e^hHoHZ#!IisIh99{W_Gz z;l5#2*l0GZo2P+dNo2})zp3J(Igzt5> zW_B*k=GV&-`Q=hc-5ve zxe%@gKUz%cAUa@+Ruk4!wAt(e3z&?zd$@T3kK@4f(rX5($Y8iew=em&6z^VTQ0ynr zAw(}KF#DPMXG}lV5^RoAU>?5I&Z|MZ`ik8BX;&uKkSu-b70M+)okAsu-XO9MMZN9j zFTreE7viVz%CBJWPQsfkulx`U2*{KC4=3>l7pR}4rcdVW%=`9E*HScfw9>cMGqZG{ z1N>2>wY4@0m6aBSgT{O(7Ql&%3CX{GzrJn1Am4ZhK#$7&^=~vKZP_o1!q6Xp;o(s~ zBI6;!!F+;8MnXVBM8L+v!a~C!#>2(OC8a|lWI`q3AVGyD!9u0NK_Vl>W5!3}AVuRQ z!{DVrB_YHoC#NK&;i8~p!lL5Gp%*5i6Chv_qhS-I;#FYc7Uty<;1d$(mDc5w)#p_( z6=fojWF-*eW0w?QQ~gY*EI_Xy#I7j8ryCc~g|E6zfUaYhkz2I6qrbC$m{t*PgZw~H202G_5P^p zTdW;g?iihI9^Y)A+3qg;Hdve4Szj_-_kE&+X&I-HX-3+l~2= zt%aeDg~_9}>62d*+rQ?H*QXA**Uq=5uMSt&_f9tt&vwpkk1sBdt{yH9H_lE^FV1(L zFLxd;4=*k+&u$*C?w_6?FJE3>-Ur<4tIoKD(c4HX6&DgvbXhu{Qb`5?`jEBO zndJU*Nk5d_phN0l%$@D*ayEb0roe|$)3yrAc$+J>7BXR9YMD>RrO3S9LPl8E^7Ak|N8huR#O=+sFsX%IA%2FuxPz(oIL<(4dAAyv9%nU89>MMZbyv^!r!Q|*A z^m0vF!-W(x*}3E{s^z)Qq|epyyW-a3nFCmHwQDq{??Ii0GIKa4~ zfSTLYL*=WYk{P324l5t8$;zC+Qu|e*F9VOKYy8ibS00J_<%>-Z&kD`*63KSE@?vMv ziYbnTt+dL#vh?S-^?PZrLmm4L??V0StK)R<%>4uje}NLK$Xe~0+*dk6LYd;j;}e%| z%C#6Jw8^m-D&S=`GmFhr9sPP%+F@=f9C?UJ>WQ$LFNVvaR!?+f!JlbXP`DUbhBIrp zfMZRQc%OAFdA*&7!dUQ{%FQLM?osITF8py{Y-k9hd!TBF(kdf7&0nb8BV_3l)W7}! zyn3#rj(o(bU3oo~`l`LW^w8vezBv$$$LIBQ_y29-wcrjEm;`a#+0g3bZ3F{}3rh=? z3+np)=SUsp?J6l@MFARdX%V`AktE+(TxAJT*4^|7fwyUd7xiH))y32xFu55%xSe$a zU2#nHPyWAd90@^+C=tvST{q5eS5te{tfJD(;1ROz7Gl^QBq;g!6T0dNGFze_(k~sD6rx{?a8u3v4tUG$%fJ)6s33I>CZEtJ!G2|kw7KwMKmA=VzJbD z{c1-Fg(z1{2-w{q^Vbj)xym9x@DjjE911%|W<>@z*9Oo0@EM<*)6Vt$ydb4BPvFCt zElu_j?$fFU5ya;spD=i)teHLL2Oz;d9)3d0sIO(8t(LIC-82~qNL0!5V7}}ixVeP7 z^(?;D;znRG-JqJXDoR6L_70dZlRx{|?z`wK!{oZ50kg{+6+~(DFAK z^{;w=&R@LexqnL<#=m8~|El=snD*ZrxcX+iKPNl>(ELv^@9!!>N&j;M{0GV(X8Q9) i@;+(*mJq7{FonxXg1u#GfPg-|eQe+6A9$MgqyGa7Jc#%J literal 0 HcmV?d00001 diff --git a/pandas/tests/io/data/excel/gh-36122.ods b/pandas/tests/io/data/excel/gh-36122.ods new file mode 100755 index 0000000000000000000000000000000000000000..3dfdaf976da4589b48754606c8136196e68132c4 GIT binary patch literal 8974 zcmdUVby$>Z_wFE|ASu#FmvnbXhvd*8HFVdIBLdRW-Cfe%Fd*Hapn!CDOGy7*@3_v+}#v?jb+>h=73b*LJ!a%zq8}u4WCgF*h*=J2E;LnG8h_ z+4wM{`|k7m84phiF~P$-JnwI?NdLg$uK~|=l!P(Y5*Don%qiST|GZo(!8RXOoxBTr z)*D=z-=@NDNJH5#WmdCE$J@l%kUPygHCanXoJ4qN;gg$A+vs9L%cB?P$H0YbHBy!J z+%@SNzd|!v+ZS$oDdg_zFa$)RP!aQ!6?c4|;Uz>9b)e&RMugiLi9qlyZ$U=oV^FOU zG@zCw@~u!PqHMpfqtCNOd6_P@xNkt7w#?n79)0MTPZq!a0bSV0x`fO1J3w;+hD7WOZ za*mpX+7<)Z>8tS)L&=DxE?UA2KOu<_whI!HQ)m*6kv>R<8{CysvWd3Ytj~b!XQ98tQ5gWN4 z-KfMFwZ%WVLp+N53=#T@ZY;g_MZdwTl~3^cukD;acknd4-ieJ19NwEhrgc9-J}A*E zy@9=7>-RZV1GRT+UH-3Y{rK)t0Xw=`8H4Xvdsllie3}#cw#CzL5l;L;u|$ACbOqZo zNwSU0wDm*r=q?X>ZVbw&v0`ttdf2YF<8x4z-3VgVTiC{gC z0Ma3Lu6!I!A)9dioX(0Hv+}o>C)B%;3(!d&!A@hZzs#fQWPra)@MbjHGNK{svay){ zAU+HhD<_4v0eZG8o;RRd#)8Qnt|P=rb5%146z$9xW_s-0n8&WQb>Z9uog8LlK0GdK z-ELOMK8W1nIl)ISj@&}SJjm${K_4e&~hdi!j}ZbM_RYQyA37xLoDXI^B@;$^W~EbkBGu8jnnIGL8vR9+7>DR#8H* zDep2YrWqQ7jozvjXa)ltEYL95wJ-<_Z%b5`;$E0;$|uuJTV|Z|m5)k)$NwEy*xME7 z0DiiXBr^rGget***xW&nY{L|LCC90QHB$@S@_A0gf*FlH&YS-8FW0mZ(cfw6)fKRW z>A?ZX3q%;kp3L*=m7$3Z)+e`dB>rAvHg!>89IOz_@G?KfKvq;@7la5*RcVpT6hNbm z)Hlm(wr$MY^Q&#fuhJPo&)+j(3MOYrHCR!7_S5ec7TrtY&HlImtMGl3qDCW0wq%nw znMY^PL=I><*dJW!ZG_OO4^sQad+aQgd$M5A^yqHUYOOMbOzrT6MX#Px-qglf7h}<1OdVs4? zWkV@~wVGz|*hg?`78C@J8LiNIWqP!#ob7Df-c~)9EW%)|Fm9EIh3eSWt)T9ZxS|4# zyiP)S-V)GAj`;J`fRk^6G|CCnQO_Epja*!8-y+7yBaKx!fnDOuR5e&0%-gCePgDqAxjCV`{TWeR!Hgq!`bJe9+4;TA127s;i;5EOR}S+-)`%SeYH>> z-+t2ku2bGG3MD3#VJg?F$efex)%dWL^6X4Dgef3lt(qoV4DvmJfls%gw9UgkM}_LW z4z#ZaD}H3Kta8g%CT{1@jW285Rw6DRZPFAgkm;&3e*`5fjX5Ed*&VIE<3VXjuf~}7 zhPyYiP>cdjrcV?f4cdqK4_vmM2OF2u{4a$u&o=|LNQ1r@ogZHd0xyGAlKrZ#5h)ej zb%=>}hdHNGtdW;?qLS|#8)efh%tEgS3h1Fj&Vx)Abs zA2w9f#Th}3BvW4Xkcu^e%|mv64WuQU30GUDw=^pD3i%9U|P z1=7@HB2ST79yOiQEJ;d#PRm){kdbZ@mFshPJVJrNJaTd~tbaaps*x4g8LVI?!;z_n z7W-+uj}=nigBQo4DDeu)sw2@}fRTcf!5$r0{`U}8bv@YTh#=M%n&OFQ>cBNv)tK|SAQ2#7 z`)v%-7R#;m^AI+d$L-Sv;i6?boyKo4yCRC_D1^#3vNGmWiFD{wLn|RD7QHxrKk{^rei^m%GrGwc6N8FkJD2oyk+FAG--1{DjnX$M!Jd`*_|)SKnQ+ zQ$9Axpn+7l1UY}7QG1lrHeWaLOyla5)H=vV+@D z6M{$S=QM|#`QKKnPR)ZqS?8{9<-i}=IqSwYN}eo`QmJ>rP16p)pdwyaCB9aD@%D3a z;w=iy3z}>{p51qVzVM-sL9leqTl#_N-ybTu$KYnSCIBpnfCUV1NjfPO<`GPXUIjLD zKvU-o=OYFAW?)}hOc2K%O16}{C=OyLDQLYlFmSFE=qRb$T|0rPeZQ+yTsKuxCvODh z$U4zWA`*0Jz|4l@48cMm+SPT6|c3{_AD|^-=f!IUk z8!On~_)U-z>uEsxy1sU^As*Meitv=aB1@lJ^P5ghhb7r7T6uN}$0Y^aF+1Gy@$pmO0nDp8jzc}j(M)T;TF&ewOm4&K=2bcco~9*oZ8D02cpk0^ zQ9`?c_3TXe@-;-w4yjTN{_9GR5ZWR|H5xph`A2ozsN3wo3s!T7>)HoWpiYr!soW^+ zD|rup%op_PX(juUQ_rbKJHe1RkiW56w+P$X7}f}*+?8C-f^Si)0;^vlmVX7NCpdSz zJwJbb(|}G?J07pID&va*DI0|~c~_Qh`ZeSlxL9=i*Ez9KN=fJd5dg5z{F%6p zXlvtmAAa8*6rmb2;Au|umNRv`7;{@@N}j#R;u2ZvM4~C~fT`3?EV6itZjv?_A>dZo z{(}YB9I;sIv9*PS4<1ePg5^lx4?8Lr+F)`FvfQ<-ZN`fL{TdX-gK285&>OKmbz zULrI2c5r|`HLI&qf%hf(;B0+_$sQG+vREZ4j^!A2MP(}&Sl-@pxfQ|CmGyirGoRV_ zHEK(g*r-VXHhFO0>0%{5hCVvudFsAPrteFsS?OeVdJYpK$+@kk0bMuvAYvV_Jef9$ z@g5=`y7{$%4?E6T1sWDB`P|Z7vPxo&Dxp$O>UN{?&e)5z9AsGEi7x1q(9X^ix~8xk$j0jLMDBfVHS}U^bkH9tl-A4 z5;b_C8y(artDRh*G4~-W(tlI99zS}Bmz@==j_g`FNGJT5!>KQ$hlcC>s(IZwe{h@xD{Metvw{GS|jE*C9PiIcCF;_Qf;`SHTx+-3+(qtFtc7UD^V z*f7IQ&u6h=#{)#v>Kr_SwFWu3_y|54l&)6LnYbB9^oje`dHllaGJR9Qg1RBjaI997 zyd}dyU{!*Kz#EjQbaFj|ip|iP8SDKv(C~Jawv&`+q{`%_u*E}KDe>xfq4{2-1EWk4 z!^W}6ATD~AS(DdQ(57&tf%$B<5xHUe=&VHa4y^RpY1Uw#a{8XNs+NXnkMQoYr=?iv zKyx)kdH3o#jw`~s>9XF->)BJ!rIO86SC?RT)Rz%CS$OCH-{ELV8q&B|c~<BgY~+|s3pwbB7E4hZ*A9g&gRu+;E!Ju(NnFX-^)DyAhQ}ffm1CchYMyD6KdK6^ za#vbq&=L(Hwe@_PV@mq|#I=d60 zhJS&`M1exN)xn!xkDlb~@rEt9p{8o_zyLFCe%zWirUH*&3r z7!>UpU7S?ZIvo6_j~``A>Xyepje6A8-b#Sl;t{$@CC@5Ts`n|=`x+t;r!=~2dp@Oz zCp!ZxUf9`Jl~mV_<3)BcLrU;Mn58T&#d2Udcg+dNU-QB3^H=W_%W(~5zw4-&)pZBx zj%*VrmaZA>mObv$7S6}7^J%)Y9U1bk^2wQ<3|?W^eoMO}`>r46c<&RT8+f^-p@#xn*+YjX@QL$9nEb_{}ZZf>VT(s(ckQs z^?px1i1eR!#o+bj#L=2!PRN12d3yRFsaGqU!f>Ok%t+K`X$lHQpO@O8p#O@uNtZBn zHkN6=RK&Rwj+Ewz?!DWToxIm2uA*S8!yXk^4cGAWaFI;PP(GTCm&daQ>KuqKH%X7g z6xK&iB1iJm(2@4YB2^kwd`Waxn{Z*~<-zG;&dP>gQ?atI7W*}yV>O&WqA1??eWeL< zgSy$<0~OM30?3w@g?v)o;!tfk@XNNpo-A)1Td4PN+9vYWp5IciGrX?~?KnN*i%)jx z^RQ46HZuTMHdu$~@Jt!INRzsSBq_7+pQpU0-HZ`d2PEpRSP>UG-mvus zdj&8nJ4dvB6k-UHxGgg+Ou`#TZRRfH{$UTS>cH3@xJeC zh`*1|2XR;;@i8jLz}gpm*<;w^p!Ea!eV9k@cqdU1^XcJnS~+)k67Q7N4goCdqwJDa zG24B_JqG6US09_L6rSLr?>WW`tRTxq=di7&8hSiWjKXEO+RwUjq;W6lM?htW~e-0~1#=o;RaNkwjH z${KyOtt3YBettmn!gPWsxwOUA3$Es}K+c&QyR&?gvWIKGgU0L-jr<{<&iY5G*^Q(g z@4;OE#wZM%`9Z~iMFbNYo>%kI2p;CyB2Nl6D8r&$iwpg_+E<_Z`E^-*vKe9>-Hp{$ zQ?$RC)PG4)t*56f-#JxqOO-foQlQ_IyCDIjWm_Tyt{@Op_iHV>Zn_6j;BLPO(u!uF zci>W+Bp(dN1LjijGN(?yBq!2rH+mAFEdTs6`eHP+-+dgXYn_j;ci^F+7#TX(gAS3t zLXuu6{FBa!I9Uctp`=um^M>JwqkyL8A2I1)e0gyxK;R_We!25>m0_PHhBIdpvdj}f zEPPNMMA$W>&O-B@9q5;76^jC*&aed2WwAx(f_V!~%s z_|Iq<>Dbtq*m>Dl`8oJ`xdr(I#QCK(BzU-G1bJjc1;m9!#6(|8NlL0o2nzH zloe$)On`dUnmUF$7H$?AFYUEu?e!FG^pzoIY97|QHl~L9md@r5u3#%;4{Lo-7h7{k zupPwL-8;}TFx<;h&DQ}G_S!ty*FE0H&iAeNJ0JhBu(05;=$NR;gqY}r#Kge3tl*U5 z#MI1)%<{PW`mFe%_i6FDnaM?2@kO~AWo2dWODalB%kwK5tBbR%%ZkctTbi32p=}+F zJtK|19~+04TSk}q+v+CYu3KfAKMI@z}}^J#H*>S%3pX?J^V=V<-(a`*Ur_x$E?XXET> z`|{iR)$!)R@yYSk^~LGo_0`GE&CUHh-`@6X6B6D{@{qKcu!`%{?iU#aWgw>4r^kWM zRovnHo_lA625J%H1Mv;vwN^;Yl%KFK1qZy&d8gC%fCam>7+uKfJ9y5TxI7hht{ z3;bmMx-&Z~hx}P+%-pjx&jQak>NJ%0Dk*s1*_}Ku4Age1amX$_PsNez9*)^MxEDdj zQD3LEDAaOV%t|}&QmZRnN)@?U@HeVm6SPh|Ow(iFfS^@gnA(gCW1ae&t~0qXLf^SE ztCa2En7_D4%Kq|g2uCNe#`cRYR`l?IX;Z}ouR*Cc<9r2n+qMiTWtRZEd>yh5ZA-0qH-4X+&#pT6HMfAP@EXZKp-6bckBFrEyFTwO*Ve2jo zD~Xn~=^#OuIOFl_s%5R#RDCXw9SQM127E?F#UMgQ20C0C(8+77MjFuYdHA0RJgoSd z`dU4eglfA2M0}O1>Qyp<-Q@p5pFmBKP%J5==D^4yf;?n&&G5N0ZC z^)J_tyLt0v+?r?LWt~e=gThdrqCxlM0M@E<@c^ywnh&T>5^&ZYVA`I`s#PBCF_ zPLbtK7@@y&jEXWa4=@3L7Nzgk=coOUsQ-!jc?SS+ujBqSmAmS{i_m{>|0favxR;@S zTG(CnpG^Ntl>Rf>y-57i2JWi=B>PpQ{xi?thX;E%jsN8NyJ-DqroZ<{;5SUair9bV zxqo~8G^gM2+>h)(g7Ne^`F# zxW6jA_aVbiQ@CUKukW}M#eZz#AIM+z!g~eyr-j|U>+k-X@s}R_Pt>p3qx(a`PlMcr zG(VNaf8u@(dhT;eKP?OGZ<(fl0)IWy{C$ArcW(062OJoL@=qhadrkk=ODoF2KfJ%~$=%2J&gn!G-Cz9=7`lA` literal 0 HcmV?d00001 diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 431a50477fccc..33467be42dfd9 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -499,6 +499,23 @@ def test_reader_spaces(self, read_ext): ) tm.assert_frame_equal(actual, expected) + # gh-36122, gh-35802 + @pytest.mark.parametrize( + "basename,expected", + [ + ("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})), + ("gh-36122", DataFrame(columns=["got 2nd sa"])), + ], + ) + def test_read_excel_ods_nested_xml(self, read_ext, basename, expected): + # see gh-35802 + engine = pd.read_excel.keywords["engine"] + if engine != "odf": + pytest.skip(f"Skipped for engine: {engine}") + + actual = pd.read_excel(basename + read_ext) + tm.assert_frame_equal(actual, expected) + def test_reading_all_sheets(self, read_ext): # Test reading all sheet names by setting sheet_name to None, # Ensure a dict is returned. From e67220dcf98894bfe617efc3a3c879a0de7438bf Mon Sep 17 00:00:00 2001 From: alexhtn Date: Mon, 14 Sep 2020 02:07:20 +0300 Subject: [PATCH 551/632] DOC: add type BinaryIO to path param #35505 (#35568) --- pandas/io/excel/_base.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 65e95fd321772..46327daac2e43 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -551,7 +551,7 @@ class ExcelWriter(metaclass=abc.ABCMeta): Parameters ---------- - path : str + path : str or typing.BinaryIO Path to xls or xlsx or ods file. engine : str (optional) Engine to use for writing. If None, defaults to @@ -606,6 +606,21 @@ class ExcelWriter(metaclass=abc.ABCMeta): >>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer: ... df.to_excel(writer, sheet_name='Sheet3') + + You can store Excel file in RAM: + + >>> import io + >>> buffer = io.BytesIO() + >>> with pd.ExcelWriter(buffer) as writer: + ... df.to_excel(writer) + + You can pack Excel file into zip archive: + + >>> import zipfile + >>> with zipfile.ZipFile('path_to_file.zip', 'w') as zf: + ... with zf.open('filename.xlsx', 'w') as buffer: + ... with pd.ExcelWriter(buffer) as writer: + ... df.to_excel(writer) """ # Defining an ExcelWriter implementation (see abstract methods for more...) From 1f49b76d5b07099c93af1780db73a0bfd74dbcc1 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Sun, 13 Sep 2020 19:10:56 -0400 Subject: [PATCH 552/632] DOC: update DataFrame.to_feather docstring (#35408) --- pandas/core/frame.py | 6 +++--- pandas/io/feather_format.py | 11 +++++++++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 27f9d594aabc6..56dc5e54e1d59 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2202,14 +2202,14 @@ def to_stata( writer.write_file() @deprecate_kwarg(old_arg_name="fname", new_arg_name="path") - def to_feather(self, path, **kwargs) -> None: + def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- - path : str - String file path. + path : str or file-like object + If a string, it will be used as Root Directory path. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index a98eebe1c6a2a..ed3cd3cefe96e 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -1,6 +1,8 @@ """ feather-format compat """ -from pandas._typing import StorageOptions +from typing import AnyStr + +from pandas._typing import FilePathOrBuffer, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas import DataFrame, Int64Index, RangeIndex @@ -8,7 +10,12 @@ from pandas.io.common import get_filepath_or_buffer -def to_feather(df: DataFrame, path, storage_options: StorageOptions = None, **kwargs): +def to_feather( + df: DataFrame, + path: FilePathOrBuffer[AnyStr], + storage_options: StorageOptions = None, + **kwargs, +): """ Write a DataFrame to the binary Feather format. From 1b2f1f47b90fe585b8a01705482b1193aa281393 Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Mon, 14 Sep 2020 01:25:29 +0200 Subject: [PATCH 553/632] Concatenating rows with Int64 datatype coerces to object --- pandas/tests/reshape/test_concat.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 90705f827af25..7d6611722d8b5 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2918,3 +2918,12 @@ def test_concat_frame_axis0_extension_dtypes(): result = pd.concat([df2, df1], ignore_index=True) expected = pd.DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") tm.assert_frame_equal(result, expected) + + +def test_concat_preserves_extension_int64_dtype(): + # GH 24768 + df_a = pd.DataFrame({"a": [-1]}, dtype="Int64") + df_b = pd.DataFrame({"b": [1]}, dtype="Int64") + result = pd.concat([df_a, df_b], ignore_index=True) + expected = pd.DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64") + tm.assert_frame_equal(result, expected) From 8df0218a4bfae50a7191ff0869e63c28884fb733 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sun, 13 Sep 2020 23:28:54 -0700 Subject: [PATCH 554/632] REF: use check_setitem_lengths in DTA.__setitem__ (#36339) --- pandas/core/arrays/datetimelike.py | 24 +++----------- pandas/core/indexers.py | 42 +++++++++++++++--------- pandas/tests/arrays/test_datetimelike.py | 10 ++++++ 3 files changed, 42 insertions(+), 34 deletions(-) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 6f0e2a6a598fc..377996344dbbc 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,6 +1,6 @@ from datetime import datetime, timedelta import operator -from typing import Any, Callable, Optional, Sequence, Tuple, Type, TypeVar, Union, cast +from typing import Any, Callable, Optional, Sequence, Tuple, Type, TypeVar, Union import warnings import numpy as np @@ -58,7 +58,7 @@ from pandas.core.arrays.base import ExtensionOpsMixin import pandas.core.common as com from pandas.core.construction import array, extract_array -from pandas.core.indexers import check_array_indexer +from pandas.core.indexers import check_array_indexer, check_setitem_lengths from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import invalid_comparison, make_invalid_op @@ -605,23 +605,9 @@ def __setitem__( # to a period in from_sequence). For DatetimeArray, it's Timestamp... # I don't know if mypy can do that, possibly with Generics. # https://mypy.readthedocs.io/en/latest/generics.html - if is_list_like(value): - is_slice = isinstance(key, slice) - - if lib.is_scalar(key): - raise ValueError("setting an array element with a sequence.") - - if not is_slice: - key = cast(Sequence, key) - if len(key) != len(value) and not com.is_bool_indexer(key): - msg = ( - f"shape mismatch: value array of length '{len(key)}' " - "does not match indexing result of length " - f"'{len(value)}'." - ) - raise ValueError(msg) - elif not len(key): - return + no_op = check_setitem_lengths(key, value, self) + if no_op: + return value = self._validate_setitem_value(value) key = check_array_indexer(self, key) diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index d9aa02db3e42a..6c88ae1e03cda 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -114,7 +114,7 @@ def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool: # Indexer Validation -def check_setitem_lengths(indexer, value, values) -> None: +def check_setitem_lengths(indexer, value, values) -> bool: """ Validate that value and indexer are the same length. @@ -133,34 +133,46 @@ def check_setitem_lengths(indexer, value, values) -> None: Returns ------- - None + bool + Whether this is an empty listlike setting which is a no-op. Raises ------ ValueError When the indexer is an ndarray or list and the lengths don't match. """ - # boolean with truth values == len of the value is ok too + no_op = False + if isinstance(indexer, (np.ndarray, list)): - if is_list_like(value) and len(indexer) != len(value): - if not ( - isinstance(indexer, np.ndarray) - and indexer.dtype == np.bool_ - and len(indexer[indexer]) == len(value) - ): - raise ValueError( - "cannot set using a list-like indexer " - "with a different length than the value" - ) + # We can ignore other listlikes becasue they are either + # a) not necessarily 1-D indexers, e.g. tuple + # b) boolean indexers e.g. BoolArray + if is_list_like(value): + if len(indexer) != len(value): + # boolean with truth values == len of the value is ok too + if not ( + isinstance(indexer, np.ndarray) + and indexer.dtype == np.bool_ + and len(indexer[indexer]) == len(value) + ): + raise ValueError( + "cannot set using a list-like indexer " + "with a different length than the value" + ) + if not len(indexer): + no_op = True elif isinstance(indexer, slice): - # slice - if is_list_like(value) and len(values): + if is_list_like(value): if len(value) != length_of_indexer(indexer, values): raise ValueError( "cannot set using a slice indexer with a " "different length than the value" ) + if not len(value): + no_op = True + + return no_op def validate_indices(indices: np.ndarray, n: int) -> None: diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 0ae6b5bde5297..83b98525d3e8a 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -338,6 +338,16 @@ def test_setitem_raises(self): with pytest.raises(TypeError, match="'value' should be a.* 'object'"): arr[0] = object() + msg = "cannot set using a list-like indexer with a different length" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[[]] = [arr[1]] + + msg = "cannot set using a slice indexer with a different length than" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[1:1] = arr[:3] + @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series]) def test_setitem_numeric_raises(self, arr1d, box): # We dont case e.g. int64 to our own dtype for setitem From fd20f7d34b8601a99b450a615241c233f034fc88 Mon Sep 17 00:00:00 2001 From: Kumar Shivam <53289673+kshivi99@users.noreply.github.com> Date: Tue, 15 Sep 2020 01:55:59 +0530 Subject: [PATCH 555/632] DOC: Added docstring for storage_options for read_csv GH36361 (#36364) * Update parsers.py DOC: Added docstring for storage_options GH36361 * Update parsers.py DOC: Added docstring for storage_options GH36361 * Update parsers.py removed trailing whitespace --- pandas/io/parsers.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 2780b1a7f86c9..0f1cce273a146 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -341,6 +341,15 @@ values. The options are `None` or `high` for the ordinary converter, `legacy` for the original lower precision pandas converter, and `round_trip` for the round-trip converter. +storage_options : dict, optional + Extra options that make sense for a particular storage connection, e.g. + host, port, username, password, etc., if using a URL that will + be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error + will be raised if providing this argument with a local path or + a file-like buffer. See the fsspec and backend storage implementation + docs for the set of allowed keys and values. + + .. versionadded:: 1.2.0 Returns ------- From 918948734f80afc401a112cdb27afa9463121d25 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 14 Sep 2020 19:00:28 -0700 Subject: [PATCH 556/632] REF: _unbox_scalar, _unbox_listlike for Categorical (#36362) --- pandas/core/arrays/categorical.py | 29 +++++++++++++++++------------ pandas/core/arrays/datetimelike.py | 2 +- pandas/core/indexes/category.py | 6 ++---- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 45623f182144b..b6cd0e325f8a6 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -93,7 +93,7 @@ def func(self, other): if is_scalar(other): if other in self.categories: - i = self.categories.get_loc(other) + i = self._unbox_scalar(other) ret = op(self._codes, i) if opname not in {"__eq__", "__ge__", "__gt__"}: @@ -1184,8 +1184,7 @@ def _validate_searchsorted_value(self, value): # searchsorted is very performance sensitive. By converting codes # to same dtype as self.codes, we get much faster performance. if is_scalar(value): - codes = self.categories.get_loc(value) - codes = self.codes.dtype.type(codes) + codes = self._unbox_scalar(value) else: locs = [self.categories.get_loc(x) for x in value] codes = np.array(locs, dtype=self.codes.dtype) @@ -1212,7 +1211,7 @@ def _validate_fill_value(self, fill_value): if isna(fill_value): fill_value = -1 elif fill_value in self.categories: - fill_value = self.categories.get_loc(fill_value) + fill_value = self._unbox_scalar(fill_value) else: raise ValueError( f"'fill_value={fill_value}' is not present " @@ -1680,7 +1679,7 @@ def fillna(self, value=None, method=None, limit=None): if isna(value): codes[mask] = -1 else: - codes[mask] = self.categories.get_loc(value) + codes[mask] = self._unbox_scalar(value) else: raise TypeError( @@ -1734,6 +1733,17 @@ def _validate_listlike(self, target: ArrayLike) -> np.ndarray: return codes + def _unbox_scalar(self, key) -> int: + # searchsorted is very performance sensitive. By converting codes + # to same dtype as self.codes, we get much faster performance. + code = self.categories.get_loc(key) + code = self._codes.dtype.type(code) + return code + + def _unbox_listlike(self, value): + unboxed = self.categories.get_indexer(value) + return unboxed.astype(self._ndarray.dtype, copy=False) + # ------------------------------------------------------------------ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None): @@ -1925,11 +1935,7 @@ def _validate_setitem_value(self, value): "category, set the categories first" ) - lindexer = self.categories.get_indexer(rvalue) - if isinstance(lindexer, np.ndarray) and lindexer.dtype.kind == "i": - lindexer = lindexer.astype(self._ndarray.dtype) - - return lindexer + return self._unbox_listlike(rvalue) def _validate_setitem_key(self, key): if lib.is_integer(key): @@ -2155,8 +2161,7 @@ def unique(self): return cat.set_categories(cat.categories.take(take_codes)) def _values_for_factorize(self): - codes = self.codes.astype("int64") - return codes, -1 + return self._ndarray, -1 @classmethod def _from_factorized(cls, uniques, original): diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 377996344dbbc..14f713530868a 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -697,7 +697,7 @@ def copy(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT: return new_obj def _values_for_factorize(self): - return self.asi8, iNaT + return self._ndarray, iNaT @classmethod def _from_factorized(cls, values, original): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 85ef3e58576e3..829cf767c448f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -512,10 +512,8 @@ def _reindex_non_unique(self, target): # -------------------------------------------------------------------- # Indexing Methods - def _maybe_cast_indexer(self, key): - code = self.categories.get_loc(key) - code = self.codes.dtype.type(code) - return code + def _maybe_cast_indexer(self, key) -> int: + return self._data._unbox_scalar(key) @Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): From 947c8f2c5d3ce99ac3d08dd8c433b98473e7b0da Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 14 Sep 2020 19:01:36 -0700 Subject: [PATCH 557/632] REF: _assert_can_do_op -> _validate_scalar (#36367) --- pandas/core/indexes/base.py | 8 +++++--- pandas/core/indexes/category.py | 5 +++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 3d177e08bb0f5..15944565cb254 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2250,7 +2250,7 @@ def fillna(self, value=None, downcast=None): DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ - self._assert_can_do_op(value) + value = self._validate_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: @@ -4053,12 +4053,14 @@ def _validate_fill_value(self, value): """ return value - def _assert_can_do_op(self, value): + def _validate_scalar(self, value): """ - Check value is valid for scalar op. + Check that this is a scalar value that we can use for setitem-like + operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") + return value @property def _has_complex_internals(self) -> bool: diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 829cf767c448f..9e4714060e23e 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -380,8 +380,9 @@ def _isnan(self): @doc(Index.fillna) def fillna(self, value, downcast=None): - self._assert_can_do_op(value) - return CategoricalIndex(self._data.fillna(value), name=self.name) + value = self._validate_scalar(value) + cat = self._data.fillna(value) + return type(self)._simple_new(cat, name=self.name) @cache_readonly def _engine(self): From 74c352cc19ba6b15684c9adb71c5fdbdbc92e0bb Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 14 Sep 2020 19:02:27 -0700 Subject: [PATCH 558/632] REF: share code for __setitem__ (#36366) --- pandas/core/arrays/_mixins.py | 12 ++++++++++++ pandas/core/arrays/categorical.py | 14 -------------- pandas/core/arrays/datetimelike.py | 4 +--- pandas/core/arrays/numpy_.py | 8 -------- 4 files changed, 13 insertions(+), 25 deletions(-) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index e9d8671b69c78..284dd31ffcb59 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -9,6 +9,7 @@ from pandas.core.algorithms import take, unique from pandas.core.array_algos.transforms import shift from pandas.core.arrays.base import ExtensionArray +from pandas.core.indexers import check_array_indexer _T = TypeVar("_T", bound="NDArrayBackedExtensionArray") @@ -156,3 +157,14 @@ def _validate_shift_value(self, fill_value): # TODO: after deprecation in datetimelikearraymixin is enforced, # we can remove this and ust validate_fill_value directly return self._validate_fill_value(fill_value) + + def __setitem__(self, key, value): + key = self._validate_setitem_key(key) + value = self._validate_setitem_value(value) + self._ndarray[key] = value + + def _validate_setitem_key(self, key): + return check_array_indexer(self, key) + + def _validate_setitem_value(self, value): + return value diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index b6cd0e325f8a6..25073282ec0f6 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1894,20 +1894,6 @@ def __getitem__(self, key): return result return self._from_backing_data(result) - def __setitem__(self, key, value): - """ - Item assignment. - - Raises - ------ - ValueError - If (one or more) Value is not in categories or if a assigned - `Categorical` does not have the same categories - """ - key = self._validate_setitem_key(key) - value = self._validate_setitem_value(value) - self._ndarray[key] = value - def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 14f713530868a..e8b1c12687584 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -609,9 +609,7 @@ def __setitem__( if no_op: return - value = self._validate_setitem_value(value) - key = check_array_indexer(self, key) - self._ndarray[key] = value + super().__setitem__(key, value) self._maybe_clear_freq() def _maybe_clear_freq(self): diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index d3fa87d5ea7ff..61ffa28d31ba0 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -259,11 +259,6 @@ def __getitem__(self, item): result = type(self)(result) return result - def __setitem__(self, key, value) -> None: - key = self._validate_setitem_key(key) - value = self._validate_setitem_value(value) - self._ndarray[key] = value - def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) @@ -271,9 +266,6 @@ def _validate_setitem_value(self, value): value = np.asarray(value, dtype=self._ndarray.dtype) return value - def _validate_setitem_key(self, key): - return check_array_indexer(self, key) - def isna(self) -> np.ndarray: return isna(self._ndarray) From ce7b4c0784fe08938349c72789a34f82a2ddcac4 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 14 Sep 2020 21:04:46 -0500 Subject: [PATCH 559/632] CI: Add stale PR action (#36336) --- .github/workflows/stale-pr.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/stale-pr.yml diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml new file mode 100644 index 0000000000000..0cbe4b7dd4582 --- /dev/null +++ b/.github/workflows/stale-pr.yml @@ -0,0 +1,21 @@ +name: "Stale PRs" +on: + schedule: + # * is a special character in YAML so you have to quote this string + - cron: "0 */6 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-pr-message: "This pull request is stale because it has been open for thirty days with no activity." + skip-stale-pr-message: false + stale-pr-label: "Stale" + exempt-pr-labels: "Needs Review,Blocked" + days-before-stale: 30 + days-before-close: -1 + remove-stale-when-updated: true + debug-only: true From 3833dc5a52f7f54e7d21238b45edc4f002afa336 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Mon, 14 Sep 2020 21:06:06 -0500 Subject: [PATCH 560/632] BUG: add py39 compat check for ast.slice #32766 (#36080) --- pandas/core/computation/expr.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index f5897277d83bf..09fc53716dda9 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -10,6 +10,8 @@ import numpy as np +from pandas.compat import PY39 + import pandas.core.common as com from pandas.core.computation.ops import ( ARITH_OPS_SYMS, @@ -186,7 +188,6 @@ def _filter_nodes(superclass, all_nodes=_all_nodes): _stmt_nodes = _filter_nodes(ast.stmt) _expr_nodes = _filter_nodes(ast.expr) _expr_context_nodes = _filter_nodes(ast.expr_context) -_slice_nodes = _filter_nodes(ast.slice) _boolop_nodes = _filter_nodes(ast.boolop) _operator_nodes = _filter_nodes(ast.operator) _unary_op_nodes = _filter_nodes(ast.unaryop) @@ -197,6 +198,9 @@ def _filter_nodes(superclass, all_nodes=_all_nodes): _keyword_nodes = _filter_nodes(ast.keyword) _alias_nodes = _filter_nodes(ast.alias) +if not PY39: + _slice_nodes = _filter_nodes(ast.slice) + # nodes that we don't support directly but are needed for parsing _hacked_nodes = frozenset(["Assign", "Module", "Expr"]) From 6929e262dd22ac35baabf87a5236d451255fb66d Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Mon, 14 Sep 2020 21:11:47 -0500 Subject: [PATCH 561/632] Move sort index to generic (#36177) --- pandas/core/frame.py | 68 +++++++----------------------------------- pandas/core/generic.py | 47 +++++++++++++++++++++++++++++ pandas/core/series.py | 64 +++++++-------------------------------- pandas/core/sorting.py | 67 +++++++++++++++++++++++++++++++++++++++-- 4 files changed, 134 insertions(+), 112 deletions(-) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 56dc5e54e1d59..bc0e55195fb3e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -143,7 +143,6 @@ ) from pandas.core.reshape.melt import melt from pandas.core.series import Series -from pandas.core.sorting import ensure_key_mapped from pandas.io.common import get_filepath_or_buffer from pandas.io.formats import console, format as fmt @@ -5448,62 +5447,17 @@ def sort_index( C 3 d 4 """ - # TODO: this can be combined with Series.sort_index impl as - # almost identical - - inplace = validate_bool_kwarg(inplace, "inplace") - - axis = self._get_axis_number(axis) - labels = self._get_axis(axis) - labels = ensure_key_mapped(labels, key, levels=level) - - # make sure that the axis is lexsorted to start - # if not we need to reconstruct to get the correct indexer - labels = labels._sort_levels_monotonic() - if level is not None: - new_axis, indexer = labels.sortlevel( - level, ascending=ascending, sort_remaining=sort_remaining - ) - - elif isinstance(labels, MultiIndex): - from pandas.core.sorting import lexsort_indexer - - indexer = lexsort_indexer( - labels._get_codes_for_sorting(), - orders=ascending, - na_position=na_position, - ) - else: - from pandas.core.sorting import nargsort - - # Check monotonic-ness before sort an index - # GH11080 - if (ascending and labels.is_monotonic_increasing) or ( - not ascending and labels.is_monotonic_decreasing - ): - if inplace: - return - else: - return self.copy() - - indexer = nargsort( - labels, kind=kind, ascending=ascending, na_position=na_position - ) - - baxis = self._get_block_manager_axis(axis) - new_data = self._mgr.take(indexer, axis=baxis, verify=False) - - # reconstruct axis if needed - new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() - - if ignore_index: - new_data.axes[1] = ibase.default_index(len(indexer)) - - result = self._constructor(new_data) - if inplace: - return self._update_inplace(result) - else: - return result.__finalize__(self, method="sort_index") + return super().sort_index( + axis, + level, + ascending, + inplace, + kind, + na_position, + sort_remaining, + ignore_index, + key, + ) def value_counts( self, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 5336d0828881b..0a8dd578bf461 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -40,6 +40,7 @@ CompressionOptions, FilePathOrBuffer, FrameOrSeries, + IndexKeyFunc, IndexLabel, JSONSerializable, Label, @@ -92,6 +93,7 @@ import pandas.core.common as com from pandas.core.construction import create_series_with_explicit_dtype from pandas.core.flags import Flags +from pandas.core.indexes import base as ibase from pandas.core.indexes.api import Index, MultiIndex, RangeIndex, ensure_index from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import Period, PeriodIndex @@ -100,6 +102,7 @@ from pandas.core.missing import find_valid_index from pandas.core.ops import align_method_FRAME from pandas.core.shared_docs import _shared_docs +from pandas.core.sorting import get_indexer_indexer from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window from pandas.io.formats import format as fmt @@ -4409,6 +4412,50 @@ def sort_values( """ raise AbstractMethodError(self) + def sort_index( + self, + axis=0, + level=None, + ascending: bool_t = True, + inplace: bool_t = False, + kind: str = "quicksort", + na_position: str = "last", + sort_remaining: bool_t = True, + ignore_index: bool_t = False, + key: IndexKeyFunc = None, + ): + + inplace = validate_bool_kwarg(inplace, "inplace") + axis = self._get_axis_number(axis) + target = self._get_axis(axis) + + indexer = get_indexer_indexer( + target, level, ascending, kind, na_position, sort_remaining, key + ) + + if indexer is None: + if inplace: + return + else: + return self.copy() + + baxis = self._get_block_manager_axis(axis) + new_data = self._mgr.take(indexer, axis=baxis, verify=False) + + # reconstruct axis if needed + new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic() + + if ignore_index: + axis = 1 if isinstance(self, ABCDataFrame) else 0 + new_data.axes[axis] = ibase.default_index(len(indexer)) + + result = self._constructor(new_data) + + if inplace: + return self._update_inplace(result) + else: + return result.__finalize__(self, method="sort_index") + @doc( klass=_shared_doc_kwargs["klass"], axes=_shared_doc_kwargs["axes"], diff --git a/pandas/core/series.py b/pandas/core/series.py index 69376d8bf80d1..48fae9a0a91cd 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3463,59 +3463,17 @@ def sort_index( dtype: int64 """ - # TODO: this can be combined with DataFrame.sort_index impl as - # almost identical - inplace = validate_bool_kwarg(inplace, "inplace") - # Validate the axis parameter - self._get_axis_number(axis) - index = ensure_key_mapped(self.index, key, levels=level) - - if level is not None: - new_index, indexer = index.sortlevel( - level, ascending=ascending, sort_remaining=sort_remaining - ) - - elif isinstance(index, MultiIndex): - from pandas.core.sorting import lexsort_indexer - - labels = index._sort_levels_monotonic() - - indexer = lexsort_indexer( - labels._get_codes_for_sorting(), - orders=ascending, - na_position=na_position, - ) - else: - from pandas.core.sorting import nargsort - - # Check monotonic-ness before sort an index - # GH11080 - if (ascending and index.is_monotonic_increasing) or ( - not ascending and index.is_monotonic_decreasing - ): - if inplace: - return - else: - return self.copy() - - indexer = nargsort( - index, kind=kind, ascending=ascending, na_position=na_position - ) - - indexer = ensure_platform_int(indexer) - new_index = self.index.take(indexer) - new_index = new_index._sort_levels_monotonic() - - new_values = self._values.take(indexer) - result = self._constructor(new_values, index=new_index) - - if ignore_index: - result.index = ibase.default_index(len(result)) - - if inplace: - self._update_inplace(result) - else: - return result.__finalize__(self, method="sort_index") + return super().sort_index( + axis, + level, + ascending, + inplace, + kind, + na_position, + sort_remaining, + ignore_index, + key, + ) def argsort(self, axis=0, kind="quicksort", order=None) -> "Series": """ diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index d03b2f29521b7..dd6aadf570baa 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -1,11 +1,21 @@ """ miscellaneous sorting / groupby utilities """ from collections import defaultdict -from typing import TYPE_CHECKING, Callable, DefaultDict, Iterable, List, Optional, Tuple +from typing import ( + TYPE_CHECKING, + Callable, + DefaultDict, + Iterable, + List, + Optional, + Tuple, + Union, +) import numpy as np from pandas._libs import algos, hashtable, lib from pandas._libs.hashtable import unique_label_indices +from pandas._typing import IndexKeyFunc from pandas.core.dtypes.common import ( ensure_int64, @@ -20,11 +30,64 @@ from pandas.core.construction import extract_array if TYPE_CHECKING: - from pandas.core.indexes.base import Index # noqa:F401 + from pandas.core.indexes.base import Index _INT64_MAX = np.iinfo(np.int64).max +def get_indexer_indexer( + target: "Index", + level: Union[str, int, List[str], List[int]], + ascending: bool, + kind: str, + na_position: str, + sort_remaining: bool, + key: IndexKeyFunc, +) -> Optional[np.array]: + """ + Helper method that return the indexer according to input parameters for + the sort_index method of DataFrame and Series. + + Parameters + ---------- + target : Index + level : int or level name or list of ints or list of level names + ascending : bool or list of bools, default True + kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' + na_position : {'first', 'last'}, default 'last' + sort_remaining : bool, default True + key : callable, optional + + Returns + ------- + Optional[ndarray] + The indexer for the new index. + """ + + target = ensure_key_mapped(target, key, levels=level) + target = target._sort_levels_monotonic() + + if level is not None: + _, indexer = target.sortlevel( + level, ascending=ascending, sort_remaining=sort_remaining + ) + elif isinstance(target, ABCMultiIndex): + indexer = lexsort_indexer( + target._get_codes_for_sorting(), orders=ascending, na_position=na_position, + ) + else: + # Check monotonic-ness before sort an index (GH 11080) + if (ascending and target.is_monotonic_increasing) or ( + not ascending and target.is_monotonic_decreasing + ): + return None + + indexer = nargsort( + target, kind=kind, ascending=ascending, na_position=na_position + ) + return indexer + + def get_group_index(labels, shape, sort: bool, xnull: bool): """ For the particular label_list, gets the offsets into the hypothetical list From 078f88e71003a3675e3721ec572bcab86c201a5c Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Wed, 16 Sep 2020 00:20:47 +0200 Subject: [PATCH 562/632] [BUG]: Implement Kahan summation for rolling().mean() to avoid numerical issues (#36348) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/window/aggregations.pyx | 85 ++++++++++++++++++---------- pandas/tests/window/test_rolling.py | 75 ++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 30 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 8b18b56929acd..f398af6e4dd5e 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -118,6 +118,7 @@ Other enhancements - :class:`Index` with object dtype supports division and multiplication (:issue:`34160`) - :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - `Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`) +- :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`) .. _whatsnew_120.api_breaking.python: diff --git a/pandas/_libs/window/aggregations.pyx b/pandas/_libs/window/aggregations.pyx index 3ec4547d223ce..5f60b884c6ada 100644 --- a/pandas/_libs/window/aggregations.pyx +++ b/pandas/_libs/window/aggregations.pyx @@ -161,27 +161,42 @@ cdef inline float64_t calc_sum(int64_t minp, int64_t nobs, float64_t sum_x) nogi return result -cdef inline void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x) nogil: - """ add a value from the sum calc """ +cdef inline void add_sum(float64_t val, int64_t *nobs, float64_t *sum_x, + float64_t *compensation) nogil: + """ add a value from the sum calc using Kahan summation """ + + cdef: + float64_t y, t # Not NaN if notnan(val): nobs[0] = nobs[0] + 1 - sum_x[0] = sum_x[0] + val + y = val - compensation[0] + t = sum_x[0] + y + compensation[0] = t - sum_x[0] - y + sum_x[0] = t -cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x) nogil: - """ remove a value from the sum calc """ +cdef inline void remove_sum(float64_t val, int64_t *nobs, float64_t *sum_x, + float64_t *compensation) nogil: + """ remove a value from the sum calc using Kahan summation """ + + cdef: + float64_t y, t + # Not NaN if notnan(val): nobs[0] = nobs[0] - 1 - sum_x[0] = sum_x[0] - val + y = - val - compensation[0] + t = sum_x[0] + y + compensation[0] = t - sum_x[0] - y + sum_x[0] = t def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: - float64_t sum_x = 0 + float64_t sum_x = 0, compensation_add = 0, compensation_remove = 0 int64_t s, e int64_t nobs = 0, i, j, N = len(values) ndarray[float64_t] output @@ -201,23 +216,23 @@ def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start, # setup for j in range(s, e): - add_sum(values[j], &nobs, &sum_x) + add_sum(values[j], &nobs, &sum_x, &compensation_add) else: # calculate deletes for j in range(start[i - 1], s): - remove_sum(values[j], &nobs, &sum_x) + remove_sum(values[j], &nobs, &sum_x, &compensation_remove) # calculate adds for j in range(end[i - 1], e): - add_sum(values[j], &nobs, &sum_x) + add_sum(values[j], &nobs, &sum_x, &compensation_add) output[i] = calc_sum(minp, nobs, sum_x) if not is_monotonic_bounds: for j in range(s, e): - remove_sum(values[j], &nobs, &sum_x) + remove_sum(values[j], &nobs, &sum_x, &compensation_remove) return output @@ -225,7 +240,7 @@ def roll_sum_variable(ndarray[float64_t] values, ndarray[int64_t] start, def roll_sum_fixed(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp, int64_t win): cdef: - float64_t val, prev_x, sum_x = 0 + float64_t val, prev_x, sum_x = 0, compensation_add = 0, compensation_remove = 0 int64_t range_endpoint int64_t nobs = 0, i, N = len(values) ndarray[float64_t] output @@ -237,16 +252,16 @@ def roll_sum_fixed(ndarray[float64_t] values, ndarray[int64_t] start, with nogil: for i in range(0, range_endpoint): - add_sum(values[i], &nobs, &sum_x) + add_sum(values[i], &nobs, &sum_x, &compensation_add) output[i] = NaN for i in range(range_endpoint, N): val = values[i] - add_sum(val, &nobs, &sum_x) + add_sum(val, &nobs, &sum_x, &compensation_add) if i > win - 1: prev_x = values[i - win] - remove_sum(prev_x, &nobs, &sum_x) + remove_sum(prev_x, &nobs, &sum_x, &compensation_remove) output[i] = calc_sum(minp, nobs, sum_x) @@ -277,24 +292,34 @@ cdef inline float64_t calc_mean(int64_t minp, Py_ssize_t nobs, cdef inline void add_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, - Py_ssize_t *neg_ct) nogil: - """ add a value from the mean calc """ + Py_ssize_t *neg_ct, float64_t *compensation) nogil: + """ add a value from the mean calc using Kahan summation """ + cdef: + float64_t y, t # Not NaN if notnan(val): nobs[0] = nobs[0] + 1 - sum_x[0] = sum_x[0] + val + y = val - compensation[0] + t = sum_x[0] + y + compensation[0] = t - sum_x[0] - y + sum_x[0] = t if signbit(val): neg_ct[0] = neg_ct[0] + 1 cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, - Py_ssize_t *neg_ct) nogil: - """ remove a value from the mean calc """ + Py_ssize_t *neg_ct, float64_t *compensation) nogil: + """ remove a value from the mean calc using Kahan summation """ + cdef: + float64_t y, t if notnan(val): nobs[0] = nobs[0] - 1 - sum_x[0] = sum_x[0] - val + y = - val - compensation[0] + t = sum_x[0] + y + compensation[0] = t - sum_x[0] - y + sum_x[0] = t if signbit(val): neg_ct[0] = neg_ct[0] - 1 @@ -302,7 +327,7 @@ cdef inline void remove_mean(float64_t val, Py_ssize_t *nobs, float64_t *sum_x, def roll_mean_fixed(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp, int64_t win): cdef: - float64_t val, prev_x, sum_x = 0 + float64_t val, prev_x, sum_x = 0, compensation_add = 0, compensation_remove = 0 Py_ssize_t nobs = 0, i, neg_ct = 0, N = len(values) ndarray[float64_t] output @@ -311,16 +336,16 @@ def roll_mean_fixed(ndarray[float64_t] values, ndarray[int64_t] start, with nogil: for i in range(minp - 1): val = values[i] - add_mean(val, &nobs, &sum_x, &neg_ct) + add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add) output[i] = NaN for i in range(minp - 1, N): val = values[i] - add_mean(val, &nobs, &sum_x, &neg_ct) + add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add) if i > win - 1: prev_x = values[i - win] - remove_mean(prev_x, &nobs, &sum_x, &neg_ct) + remove_mean(prev_x, &nobs, &sum_x, &neg_ct, &compensation_remove) output[i] = calc_mean(minp, nobs, neg_ct, sum_x) @@ -330,7 +355,7 @@ def roll_mean_fixed(ndarray[float64_t] values, ndarray[int64_t] start, def roll_mean_variable(ndarray[float64_t] values, ndarray[int64_t] start, ndarray[int64_t] end, int64_t minp): cdef: - float64_t val, sum_x = 0 + float64_t val, compensation_add = 0, compensation_remove = 0, sum_x = 0 int64_t s, e Py_ssize_t nobs = 0, i, j, neg_ct = 0, N = len(values) ndarray[float64_t] output @@ -350,26 +375,26 @@ def roll_mean_variable(ndarray[float64_t] values, ndarray[int64_t] start, # setup for j in range(s, e): val = values[j] - add_mean(val, &nobs, &sum_x, &neg_ct) + add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add) else: # calculate deletes for j in range(start[i - 1], s): val = values[j] - remove_mean(val, &nobs, &sum_x, &neg_ct) + remove_mean(val, &nobs, &sum_x, &neg_ct, &compensation_remove) # calculate adds for j in range(end[i - 1], e): val = values[j] - add_mean(val, &nobs, &sum_x, &neg_ct) + add_mean(val, &nobs, &sum_x, &neg_ct, &compensation_add) output[i] = calc_mean(minp, nobs, neg_ct, sum_x) if not is_monotonic_bounds: for j in range(s, e): val = values[j] - remove_mean(val, &nobs, &sum_x, &neg_ct) + remove_mean(val, &nobs, &sum_x, &neg_ct, &compensation_remove) return output # ---------------------------------------------------------------------- diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 67b20fd2d6daa..88afcec0f7bf4 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -696,3 +696,78 @@ def scaled_sum(*args): expected = DataFrame(data={"X": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index) result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,)) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("add", [0.0, 2.0]) +def test_rolling_numerical_accuracy_kahan_mean(add): + # GH: 36031 implementing kahan summation + df = pd.DataFrame( + {"A": [3002399751580331.0 + add, -0.0, -0.0]}, + index=[ + pd.Timestamp("19700101 09:00:00"), + pd.Timestamp("19700101 09:00:03"), + pd.Timestamp("19700101 09:00:06"), + ], + ) + result = ( + df.resample("1s").ffill().rolling("3s", closed="left", min_periods=3).mean() + ) + dates = pd.date_range("19700101 09:00:00", periods=7, freq="S") + expected = pd.DataFrame( + { + "A": [ + np.nan, + np.nan, + np.nan, + 3002399751580330.5, + 2001599834386887.25, + 1000799917193443.625, + 0.0, + ] + }, + index=dates, + ) + tm.assert_frame_equal(result, expected) + + +def test_rolling_numerical_accuracy_kahan_sum(): + # GH: 13254 + df = pd.DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"]) + result = df["x"].rolling(3).sum() + expected = pd.Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x") + tm.assert_series_equal(result, expected) + + +def test_rolling_numerical_accuracy_jump(): + # GH: 32761 + index = pd.date_range(start="2020-01-01", end="2020-01-02", freq="60s").append( + pd.DatetimeIndex(["2020-01-03"]) + ) + data = np.random.rand(len(index)) + + df = pd.DataFrame({"data": data}, index=index) + result = df.rolling("60s").mean() + tm.assert_frame_equal(result, df[["data"]]) + + +def test_rolling_numerical_accuracy_small_values(): + # GH: 10319 + s = Series( + data=[0.00012456, 0.0003, -0.0, -0.0], + index=date_range("1999-02-03", "1999-02-06"), + ) + result = s.rolling(1).mean() + tm.assert_series_equal(result, s) + + +def test_rolling_numerical_too_large_numbers(): + # GH: 11645 + dates = pd.date_range("2015-01-01", periods=10, freq="D") + ds = pd.Series(data=range(10), index=dates, dtype=np.float64) + ds[2] = -9e33 + result = ds.rolling(5).mean() + expected = pd.Series( + [np.nan, np.nan, np.nan, np.nan, -1.8e33, -1.8e33, -1.8e33, 0.0, 6.0, 7.0], + index=dates, + ) + tm.assert_series_equal(result, expected) From 8ea00fb3c500da243eb995cc4cf913f230bcf6ed Mon Sep 17 00:00:00 2001 From: Erfan Nariman <34067903+erfannariman@users.noreply.github.com> Date: Wed, 16 Sep 2020 00:32:25 +0200 Subject: [PATCH 563/632] DOC: Example for natural sort using key argument (#36356) --- environment.yml | 1 + pandas/core/generic.py | 26 ++++++++++++++++++++++++++ requirements-dev.txt | 1 + 3 files changed, 28 insertions(+) diff --git a/environment.yml b/environment.yml index 4622aac1dc6f8..badb0ba94a670 100644 --- a/environment.yml +++ b/environment.yml @@ -106,6 +106,7 @@ dependencies: - cftime # Needed for downstream xarray.CFTimeIndex test - pyreadstat # pandas.read_spss - tabulate>=0.8.3 # DataFrame.to_markdown + - natsort # DataFrame.sort_values - pip: - git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master - git+https://github.com/numpy/numpydoc diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 0a8dd578bf461..814f307749d50 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -4409,6 +4409,32 @@ def sort_values( 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F + + Natural sort with the key argument, + using the `natsort ` package. + + >>> df = pd.DataFrame({ + ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], + ... "value": [10, 20, 30, 40, 50] + ... }) + >>> df + time value + 0 0hr 10 + 1 128hr 20 + 2 72hr 30 + 3 48hr 40 + 4 96hr 50 + >>> from natsort import index_natsorted + >>> df.sort_values( + ... by="time", + ... key=lambda x: np.argsort(index_natsorted(df["time"])) + ... ) + time value + 0 0hr 10 + 3 48hr 40 + 2 72hr 30 + 4 96hr 50 + 1 128hr 20 """ raise AbstractMethodError(self) diff --git a/requirements-dev.txt b/requirements-dev.txt index cc3775de3a4ba..c53ced35d27fa 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -73,6 +73,7 @@ xarray cftime pyreadstat tabulate>=0.8.3 +natsort git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master git+https://github.com/numpy/numpydoc pyflakes>=2.2.0 \ No newline at end of file From 0bfede2aa129296e4253c0a00ea35f641438b0d4 Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Wed, 16 Sep 2020 00:33:47 +0200 Subject: [PATCH 564/632] [TST]: Groupby raised error with duplicate column names (#36389) --- pandas/tests/groupby/test_groupby.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 313b0ea2434f9..1bb40b322cd48 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2126,3 +2126,14 @@ def test_groupby_column_index_name_lost(func): df_grouped = df.groupby([1]) result = getattr(df_grouped, func)().columns tm.assert_index_equal(result, expected) + + +def test_groupby_duplicate_columns(): + # GH: 31735 + df = pd.DataFrame( + {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]} + ).astype(object) + df.columns = ["A", "B", "B"] + result = df.groupby([0, 0, 0, 0]).min() + expected = pd.DataFrame([["e", "a", 1]], columns=["A", "B", "B"]) + tm.assert_frame_equal(result, expected) From b53fa14e1cbed18eb02d919ca962b0ec9c26aa24 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Tue, 15 Sep 2020 17:35:40 -0500 Subject: [PATCH 565/632] BUG: Fix MultiIndex column stacking with dupe names (#36371) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/core/reshape/reshape.py | 14 +++++--------- pandas/tests/frame/test_reshape.py | 13 +++++++++++++ 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 8e283aec39786..8ead78a17e9c2 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -29,6 +29,7 @@ Bug fixes - Bug in :func:`read_spss` where passing a ``pathlib.Path`` as ``path`` would raise a ``TypeError`` (:issue:`33666`) - Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`) - Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`) +- Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 6ddf53b6493e3..18ebe14763797 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -586,19 +586,15 @@ def _stack_multi_columns(frame, level_num=-1, dropna=True): def _convert_level_number(level_num, columns): """ Logic for converting the level number to something we can safely pass - to swaplevel: + to swaplevel. - We generally want to convert the level number into a level name, except - when columns do not have names, in which case we must leave as a level - number + If `level_num` matches a column name return the name from + position `level_num`, otherwise return `level_num`. """ if level_num in columns.names: return columns.names[level_num] - else: - if columns.names[level_num] is None: - return level_num - else: - return columns.names[level_num] + + return level_num this = frame.copy() diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index d80ebaa09b6a8..b10fdbb707404 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -1302,3 +1302,16 @@ def test_unstacking_multi_index_df(): ), ) tm.assert_frame_equal(result, expected) + + +def test_stack_positional_level_duplicate_column_names(): + # https://github.com/pandas-dev/pandas/issues/36353 + columns = pd.MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"]) + df = pd.DataFrame([[1, 1, 1, 1]], columns=columns) + result = df.stack(0) + + new_columns = pd.Index(["y", "z"], name="a") + new_index = pd.MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"]) + expected = pd.DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns) + + tm.assert_frame_equal(result, expected) From 51558a08e5e9aa55f536f65350383a3e52b3ae08 Mon Sep 17 00:00:00 2001 From: Zach Brookler <39153813+zbrookle@users.noreply.github.com> Date: Tue, 15 Sep 2020 18:40:55 -0400 Subject: [PATCH 566/632] DOC: Add dataframe_sql to eco system page (#36370) --- doc/source/ecosystem.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index de231e43918f8..624c0551de607 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -303,6 +303,13 @@ HTTP API, and also provides several convenient methods for parsing and analyzing fredapi makes use of pandas and returns data in a Series or DataFrame. This module requires a FRED API key that you can obtain for free on the FRED website. +`dataframe_sql `__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``dataframe_sql`` is a Python package that translates SQL syntax directly into +operations on pandas DataFrames. This is useful when migrating from a database to +using pandas or for users more comfortable with SQL looking for a way to interface +with pandas. + .. _ecosystem.domain: From 0d4a1c15c64970bf1f73325f683f13de3b91730d Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Tue, 15 Sep 2020 18:08:22 -0500 Subject: [PATCH 567/632] CLN: Clean test_arithmetic.py (#36390) --- pandas/tests/series/test_arithmetic.py | 99 +++++++++++++++----------- 1 file changed, 56 insertions(+), 43 deletions(-) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index c937e357b9dbc..a420a1f7d6bca 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -271,7 +271,6 @@ def test_comparison_flex_basic(self): tm.assert_series_equal(left.gt(right), left > right) tm.assert_series_equal(left.ge(right), left >= right) - # axis for axis in [0, None, "index"]: tm.assert_series_equal(left.eq(right, axis=axis), left == right) tm.assert_series_equal(left.ne(right, axis=axis), left != right) @@ -280,7 +279,6 @@ def test_comparison_flex_basic(self): tm.assert_series_equal(left.gt(right, axis=axis), left > right) tm.assert_series_equal(left.ge(right, axis=axis), left >= right) - # msg = "No axis named 1 for object type" for op in ["eq", "ne", "le", "le", "gt", "ge"]: with pytest.raises(ValueError, match=msg): @@ -553,32 +551,30 @@ def test_comparison_tuples(self): expected = Series([True, False]) tm.assert_series_equal(result, expected) - def test_comparison_operators_with_nas(self): + def test_comparison_operators_with_nas(self, all_compare_operators): + op = all_compare_operators ser = Series(bdate_range("1/1/2000", periods=10), dtype=object) ser[::2] = np.nan - # test that comparisons work - ops = ["lt", "le", "gt", "ge", "eq", "ne"] - for op in ops: - val = ser[5] + f = getattr(operator, op) - f = getattr(operator, op) - result = f(ser, val) + # test that comparisons work + val = ser[5] - expected = f(ser.dropna(), val).reindex(ser.index) + result = f(ser, val) + expected = f(ser.dropna(), val).reindex(ser.index) - if op == "ne": - expected = expected.fillna(True).astype(bool) - else: - expected = expected.fillna(False).astype(bool) + if op == "__ne__": + expected = expected.fillna(True).astype(bool) + else: + expected = expected.fillna(False).astype(bool) - tm.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) - # FIXME: dont leave commented-out - # fffffffuuuuuuuuuuuu - # result = f(val, s) - # expected = f(val, s.dropna()).reindex(s.index) - # tm.assert_series_equal(result, expected) + # FIXME: dont leave commented-out + # result = f(val, ser) + # expected = f(val, ser.dropna()).reindex(ser.index) + # tm.assert_series_equal(result, expected) def test_ne(self): ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) @@ -586,35 +582,52 @@ def test_ne(self): assert tm.equalContents(ts.index != 5, expected) assert tm.equalContents(~(ts.index == 5), expected) - def test_comp_ops_df_compat(self): + @pytest.mark.parametrize( + "left, right", + [ + ( + pd.Series([1, 2, 3], index=list("ABC"), name="x"), + pd.Series([2, 2, 2], index=list("ABD"), name="x"), + ), + ( + pd.Series([1, 2, 3], index=list("ABC"), name="x"), + pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x"), + ), + ], + ) + def test_comp_ops_df_compat(self, left, right): # GH 1134 - s1 = pd.Series([1, 2, 3], index=list("ABC"), name="x") - s2 = pd.Series([2, 2, 2], index=list("ABD"), name="x") - - s3 = pd.Series([1, 2, 3], index=list("ABC"), name="x") - s4 = pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x") - - for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: - - msg = "Can only compare identically-labeled Series objects" - with pytest.raises(ValueError, match=msg): - left == right + msg = "Can only compare identically-labeled Series objects" + with pytest.raises(ValueError, match=msg): + left == right + with pytest.raises(ValueError, match=msg): + right == left - with pytest.raises(ValueError, match=msg): - left != right + with pytest.raises(ValueError, match=msg): + left != right + with pytest.raises(ValueError, match=msg): + right != left - with pytest.raises(ValueError, match=msg): - left < right + with pytest.raises(ValueError, match=msg): + left < right + with pytest.raises(ValueError, match=msg): + right < left - msg = "Can only compare identically-labeled DataFrame objects" - with pytest.raises(ValueError, match=msg): - left.to_frame() == right.to_frame() + msg = "Can only compare identically-labeled DataFrame objects" + with pytest.raises(ValueError, match=msg): + left.to_frame() == right.to_frame() + with pytest.raises(ValueError, match=msg): + right.to_frame() == left.to_frame() - with pytest.raises(ValueError, match=msg): - left.to_frame() != right.to_frame() + with pytest.raises(ValueError, match=msg): + left.to_frame() != right.to_frame() + with pytest.raises(ValueError, match=msg): + right.to_frame() != left.to_frame() - with pytest.raises(ValueError, match=msg): - left.to_frame() < right.to_frame() + with pytest.raises(ValueError, match=msg): + left.to_frame() < right.to_frame() + with pytest.raises(ValueError, match=msg): + right.to_frame() < left.to_frame() def test_compare_series_interval_keyword(self): # GH#25338 From 11d5fc9e89ad8e238ece42890482bae875d9de98 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Wed, 16 Sep 2020 10:06:56 -0500 Subject: [PATCH 568/632] BLD/CI: fix py39 ci #36296 (#36393) --- .travis.yml | 3 --- asv_bench/asv.conf.json | 2 +- ci/build39.sh | 3 +-- ci/deps/azure-37-32bit.yaml | 2 +- ci/deps/azure-37-locale.yaml | 2 +- ci/deps/azure-37-locale_slow.yaml | 2 +- ci/deps/azure-37-minimum_versions.yaml | 2 +- ci/deps/azure-37-slow.yaml | 2 +- ci/deps/azure-38-locale.yaml | 2 +- ci/deps/azure-38-numpydev.yaml | 2 +- ci/deps/azure-macos-37.yaml | 2 +- ci/deps/azure-windows-37.yaml | 2 +- ci/deps/azure-windows-38.yaml | 2 +- ci/deps/travis-37-arm64.yaml | 2 +- ci/deps/travis-37-cov.yaml | 2 +- ci/deps/travis-37-locale.yaml | 2 +- ci/deps/travis-37.yaml | 2 +- ci/deps/travis-38.yaml | 2 +- doc/source/getting_started/install.rst | 2 +- doc/source/whatsnew/v1.1.3.rst | 15 +++++++++++++++ environment.yml | 2 +- pandas/_libs/writers.pyx | 8 ++------ pyproject.toml | 2 +- requirements-dev.txt | 2 +- setup.py | 3 ++- 25 files changed, 40 insertions(+), 32 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2e98cf47aea3e..93c8238bb5059 100644 --- a/.travis.yml +++ b/.travis.yml @@ -62,9 +62,6 @@ matrix: - arch: arm64 env: - JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard)" - - dist: bionic - env: - - JOB="3.9-dev" PATTERN="(not slow and not network and not clipboard)" before_install: diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 1863a17e3d5f7..e8e82edabbfa3 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -39,7 +39,7 @@ // followed by the pip installed packages). "matrix": { "numpy": [], - "Cython": ["0.29.16"], + "Cython": ["0.29.21"], "matplotlib": [], "sqlalchemy": [], "scipy": [], diff --git a/ci/build39.sh b/ci/build39.sh index b9c76635df99b..f2ef11d5a71f4 100755 --- a/ci/build39.sh +++ b/ci/build39.sh @@ -3,8 +3,7 @@ sudo apt-get install build-essential gcc xvfb pip install --no-deps -U pip wheel setuptools -pip install numpy python-dateutil pytz pytest pytest-xdist hypothesis -pip install cython --pre # https://github.com/cython/cython/issues/3395 +pip install cython numpy python-dateutil pytz pytest pytest-xdist hypothesis python setup.py build_ext -inplace python -m pip install --no-build-isolation -e . diff --git a/ci/deps/azure-37-32bit.yaml b/ci/deps/azure-37-32bit.yaml index 8e0cd73a9536d..3cdd98485f281 100644 --- a/ci/deps/azure-37-32bit.yaml +++ b/ci/deps/azure-37-32bit.yaml @@ -21,6 +21,6 @@ dependencies: # see comment above - pip - pip: - - cython>=0.29.16 + - cython>=0.29.21 - numpy>=1.16.5 - pytest>=5.0.1 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index cc996f4077cd9..64480258fe65e 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio diff --git a/ci/deps/azure-37-locale_slow.yaml b/ci/deps/azure-37-locale_slow.yaml index fbb1ea671d696..7f658fe62d268 100644 --- a/ci/deps/azure-37-locale_slow.yaml +++ b/ci/deps/azure-37-locale_slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-minimum_versions.yaml b/ci/deps/azure-37-minimum_versions.yaml index 31f82f3304db3..afd5b07cc6654 100644 --- a/ci/deps/azure-37-minimum_versions.yaml +++ b/ci/deps/azure-37-minimum_versions.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.7.1 # tools - - cython=0.29.16 + - cython=0.29.21 - pytest=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-slow.yaml b/ci/deps/azure-37-slow.yaml index d17a8a2b0ed9b..13a0d442bcae7 100644 --- a/ci/deps/azure-37-slow.yaml +++ b/ci/deps/azure-37-slow.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-38-locale.yaml b/ci/deps/azure-38-locale.yaml index bb40127b672d3..8ce58e07a8542 100644 --- a/ci/deps/azure-38-locale.yaml +++ b/ci/deps/azure-38-locale.yaml @@ -5,7 +5,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - pytest-asyncio>=0.12.0 diff --git a/ci/deps/azure-38-numpydev.yaml b/ci/deps/azure-38-numpydev.yaml index 37592086d49e3..274be0361c2e5 100644 --- a/ci/deps/azure-38-numpydev.yaml +++ b/ci/deps/azure-38-numpydev.yaml @@ -14,7 +14,7 @@ dependencies: - pytz - pip - pip: - - cython==0.29.16 # GH#34014 + - cython==0.29.21 # GH#34014 - "git+git://github.com/dateutil/dateutil.git" - "--extra-index-url https://pypi.anaconda.org/scipy-wheels-nightly/simple" - "--pre" diff --git a/ci/deps/azure-macos-37.yaml b/ci/deps/azure-macos-37.yaml index a5a69b9a59576..31e0ffca81424 100644 --- a/ci/deps/azure-macos-37.yaml +++ b/ci/deps/azure-macos-37.yaml @@ -31,6 +31,6 @@ dependencies: - xlwt - pip - pip: - - cython>=0.29.16 + - cython>=0.29.21 - pyreadstat - pyxlsb diff --git a/ci/deps/azure-windows-37.yaml b/ci/deps/azure-windows-37.yaml index 1d15ca41c0f8e..16b4bd72683b4 100644 --- a/ci/deps/azure-windows-37.yaml +++ b/ci/deps/azure-windows-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/azure-windows-38.yaml b/ci/deps/azure-windows-38.yaml index 23bede5eb26f1..449bbd05991bf 100644 --- a/ci/deps/azure-windows-38.yaml +++ b/ci/deps/azure-windows-38.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index ea29cbef1272b..d04b1ca0bdcfc 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.13 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml index 33ee6dfffb1a3..d031dc1cc062f 100644 --- a/ci/deps/travis-37-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml index 306f74a0101e3..8a0b5b043ceca 100644 --- a/ci/deps/travis-37-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index 26d6c2910a7cc..6a15ce1195ea9 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.7.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/ci/deps/travis-38.yaml b/ci/deps/travis-38.yaml index b879c0f81dab2..874c8dd96d008 100644 --- a/ci/deps/travis-38.yaml +++ b/ci/deps/travis-38.yaml @@ -6,7 +6,7 @@ dependencies: - python=3.8.* # tools - - cython>=0.29.16 + - cython>=0.29.21 - pytest>=5.0.1 - pytest-xdist>=1.21 - hypothesis>=3.58.0 diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index fde9f567cc3ec..2196c908ecf37 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -18,7 +18,7 @@ Instructions for installing from source, Python version support ---------------------- -Officially Python 3.7.1 and above, and 3.8. +Officially Python 3.7.1 and above, 3.8, and 3.9. Installing pandas ----------------- diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 8ead78a17e9c2..72526140b6eb8 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -10,6 +10,21 @@ including other versions of pandas. .. --------------------------------------------------------------------------- +Enhancements +~~~~~~~~~~~~ + +Added support for new Python version +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas 1.1.3 now supports Python 3.9 (:issue:`36296`). + +Development Changes +^^^^^^^^^^^^^^^^^^^ + +- The minimum version of Cython is now the most recent bug-fix version (0.29.21) (:issue:`36296`). + +.. --------------------------------------------------------------------------- + .. _whatsnew_113.regressions: Fixed regressions diff --git a/environment.yml b/environment.yml index badb0ba94a670..36bbd3d307159 100644 --- a/environment.yml +++ b/environment.yml @@ -12,7 +12,7 @@ dependencies: - asv # building - - cython>=0.29.16 + - cython>=0.29.21 # code checks - black=19.10b0 diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 40c39aabb7a7a..f6823c3cb0d3f 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -1,11 +1,7 @@ import cython -from cython import Py_ssize_t - -from cpython.bytes cimport PyBytes_GET_SIZE -from cpython.unicode cimport PyUnicode_GET_SIZE - import numpy as np +from cpython cimport PyBytes_GET_SIZE, PyUnicode_GET_LENGTH from numpy cimport ndarray, uint8_t ctypedef fused pandas_string: @@ -144,7 +140,7 @@ cpdef inline Py_ssize_t word_len(object val): Py_ssize_t l = 0 if isinstance(val, str): - l = PyUnicode_GET_SIZE(val) + l = PyUnicode_GET_LENGTH(val) elif isinstance(val, bytes): l = PyBytes_GET_SIZE(val) diff --git a/pyproject.toml b/pyproject.toml index f6f8081b6c464..8161e8ad752da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ requires = [ "setuptools", "wheel", - "Cython>=0.29.16,<3", # Note: sync with setup.py + "Cython>=0.29.21,<3", # Note: sync with setup.py "numpy==1.16.5; python_version=='3.7' and platform_system!='AIX'", "numpy==1.17.3; python_version>='3.8' and platform_system!='AIX'", "numpy==1.16.5; python_version=='3.7' and platform_system=='AIX'", diff --git a/requirements-dev.txt b/requirements-dev.txt index c53ced35d27fa..fb647c10f72bc 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ numpy>=1.16.5 python-dateutil>=2.7.3 pytz asv -cython>=0.29.16 +cython>=0.29.21 black==19.10b0 cpplint flake8<3.8.0 diff --git a/setup.py b/setup.py index f6f0cd9aabc0e..a8dfeb0974195 100755 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ def is_platform_mac(): min_numpy_ver = "1.16.5" -min_cython_ver = "0.29.16" # note: sync with pyproject.toml +min_cython_ver = "0.29.21" # note: sync with pyproject.toml try: import Cython @@ -199,6 +199,7 @@ def build_extensions(self): "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", "Programming Language :: Cython", "Topic :: Scientific/Engineering", ] From 1b6879a9d1beca4aa3a4afd810709b36308ced23 Mon Sep 17 00:00:00 2001 From: Matthew Roeschke Date: Wed, 16 Sep 2020 08:08:34 -0700 Subject: [PATCH 569/632] CLN: Numba internal routines (#36376) --- pandas/core/groupby/generic.py | 20 ++------ pandas/core/groupby/groupby.py | 26 +++++------ pandas/core/groupby/numba_.py | 85 ++++++---------------------------- pandas/core/util/numba_.py | 42 +++++------------ pandas/core/window/numba_.py | 10 ++-- pandas/core/window/rolling.py | 11 +---- 6 files changed, 47 insertions(+), 147 deletions(-) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index d4e673d2e538c..a931221ef3ce1 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -74,12 +74,11 @@ get_groupby, group_selection_context, ) -from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same import pandas.core.indexes.base as ibase from pandas.core.internals import BlockManager from pandas.core.series import Series -from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba +from pandas.core.util.numba_ import maybe_use_numba from pandas.plotting import boxplot_frame_groupby @@ -518,29 +517,16 @@ def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): result = getattr(self, func)(*args, **kwargs) return self._transform_fast(result) - def _transform_general( - self, func, *args, engine="cython", engine_kwargs=None, **kwargs - ): + def _transform_general(self, func, *args, **kwargs): """ Transform with a non-str `func`. """ - if maybe_use_numba(engine): - numba_func, cache_key = generate_numba_func( - func, engine_kwargs, kwargs, "groupby_transform" - ) - klass = type(self._selected_obj) results = [] for name, group in self: object.__setattr__(group, "name", name) - if maybe_use_numba(engine): - values, index = split_for_numba(group) - res = numba_func(values, index, *args) - if cache_key not in NUMBA_FUNC_CACHE: - NUMBA_FUNC_CACHE[cache_key] = numba_func - else: - res = func(group, *args, **kwargs) + res = func(group, *args, **kwargs) if isinstance(res, (ABCDataFrame, ABCSeries)): res = res._values diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ceee78bfebe68..9a14323dd8c3a 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1071,16 +1071,15 @@ def _transform_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() starts, ends = lib.generate_slices(sorted_labels, n_groups) - cache_key = (func, "groupby_transform") - if cache_key in NUMBA_FUNC_CACHE: - numba_transform_func = NUMBA_FUNC_CACHE[cache_key] - else: - numba_transform_func = numba_.generate_numba_transform_func( - tuple(args), kwargs, func, engine_kwargs - ) + + numba_transform_func = numba_.generate_numba_transform_func( + tuple(args), kwargs, func, engine_kwargs + ) result = numba_transform_func( sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) ) + + cache_key = (func, "groupby_transform") if cache_key not in NUMBA_FUNC_CACHE: NUMBA_FUNC_CACHE[cache_key] = numba_transform_func @@ -1106,16 +1105,15 @@ def _aggregate_with_numba(self, data, func, *args, engine_kwargs=None, **kwargs) sorted_labels = algorithms.take_nd(labels, sorted_index, allow_fill=False) sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() starts, ends = lib.generate_slices(sorted_labels, n_groups) - cache_key = (func, "groupby_agg") - if cache_key in NUMBA_FUNC_CACHE: - numba_agg_func = NUMBA_FUNC_CACHE[cache_key] - else: - numba_agg_func = numba_.generate_numba_agg_func( - tuple(args), kwargs, func, engine_kwargs - ) + + numba_agg_func = numba_.generate_numba_agg_func( + tuple(args), kwargs, func, engine_kwargs + ) result = numba_agg_func( sorted_data, sorted_index, starts, ends, len(group_keys), len(data.columns) ) + + cache_key = (func, "groupby_agg") if cache_key not in NUMBA_FUNC_CACHE: NUMBA_FUNC_CACHE[cache_key] = numba_agg_func diff --git a/pandas/core/groupby/numba_.py b/pandas/core/groupby/numba_.py index a2dfcd7bddd53..76f50f1387196 100644 --- a/pandas/core/groupby/numba_.py +++ b/pandas/core/groupby/numba_.py @@ -4,34 +4,17 @@ import numpy as np -from pandas._typing import FrameOrSeries, Scalar +from pandas._typing import Scalar from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import ( NUMBA_FUNC_CACHE, NumbaUtilError, - check_kwargs_and_nopython, get_jit_arguments, jit_user_function, ) -def split_for_numba(arg: FrameOrSeries) -> Tuple[np.ndarray, np.ndarray]: - """ - Split pandas object into its components as numpy arrays for numba functions. - - Parameters - ---------- - arg : Series or DataFrame - - Returns - ------- - (ndarray, ndarray) - values, index - """ - return arg.to_numpy(), arg.index.to_numpy() - - def validate_udf(func: Callable) -> None: """ Validate user defined function for ops when using Numba with groupby ops. @@ -67,46 +50,6 @@ def f(values, index, ...): ) -def generate_numba_func( - func: Callable, - engine_kwargs: Optional[Dict[str, bool]], - kwargs: dict, - cache_key_str: str, -) -> Tuple[Callable, Tuple[Callable, str]]: - """ - Return a JITed function and cache key for the NUMBA_FUNC_CACHE - - This _may_ be specific to groupby (as it's only used there currently). - - Parameters - ---------- - func : function - user defined function - engine_kwargs : dict or None - numba.jit arguments - kwargs : dict - kwargs for func - cache_key_str : str - string representing the second part of the cache key tuple - - Returns - ------- - (JITed function, cache key) - - Raises - ------ - NumbaUtilError - """ - nopython, nogil, parallel = get_jit_arguments(engine_kwargs) - check_kwargs_and_nopython(kwargs, nopython) - validate_udf(func) - cache_key = (func, cache_key_str) - numba_func = NUMBA_FUNC_CACHE.get( - cache_key, jit_user_function(func, nopython, nogil, parallel) - ) - return numba_func, cache_key - - def generate_numba_agg_func( args: Tuple, kwargs: Dict[str, Any], @@ -120,7 +63,7 @@ def generate_numba_agg_func( 2. Return a groupby agg function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's - function _AND_ the rolling apply function. + function _AND_ the groupby evaluation loop. Parameters ---------- @@ -137,16 +80,15 @@ def generate_numba_agg_func( ------- Numba function """ - nopython, nogil, parallel = get_jit_arguments(engine_kwargs) - - check_kwargs_and_nopython(kwargs, nopython) + nopython, nogil, parallel = get_jit_arguments(engine_kwargs, kwargs) validate_udf(func) + cache_key = (func, "groupby_agg") + if cache_key in NUMBA_FUNC_CACHE: + return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") - if parallel: loop_range = numba.prange else: @@ -175,17 +117,17 @@ def group_agg( def generate_numba_transform_func( args: Tuple, kwargs: Dict[str, Any], - func: Callable[..., Scalar], + func: Callable[..., np.ndarray], engine_kwargs: Optional[Dict[str, bool]], ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]: """ Generate a numba jitted transform function specified by values from engine_kwargs. 1. jit the user's function - 2. Return a groupby agg function with the jitted function inline + 2. Return a groupby transform function with the jitted function inline Configurations specified in engine_kwargs apply to both the user's - function _AND_ the rolling apply function. + function _AND_ the groupby evaluation loop. Parameters ---------- @@ -202,16 +144,15 @@ def generate_numba_transform_func( ------- Numba function """ - nopython, nogil, parallel = get_jit_arguments(engine_kwargs) - - check_kwargs_and_nopython(kwargs, nopython) + nopython, nogil, parallel = get_jit_arguments(engine_kwargs, kwargs) validate_udf(func) + cache_key = (func, "groupby_transform") + if cache_key in NUMBA_FUNC_CACHE: + return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") - if parallel: loop_range = numba.prange else: diff --git a/pandas/core/util/numba_.py b/pandas/core/util/numba_.py index b951cd4f0cc2a..f06dd10d0e497 100644 --- a/pandas/core/util/numba_.py +++ b/pandas/core/util/numba_.py @@ -24,37 +24,8 @@ def set_use_numba(enable: bool = False) -> None: GLOBAL_USE_NUMBA = enable -def check_kwargs_and_nopython( - kwargs: Optional[Dict] = None, nopython: Optional[bool] = None -) -> None: - """ - Validate that **kwargs and nopython=True was passed - https://github.com/numba/numba/issues/2916 - - Parameters - ---------- - kwargs : dict, default None - user passed keyword arguments to pass into the JITed function - nopython : bool, default None - nopython parameter - - Returns - ------- - None - - Raises - ------ - NumbaUtilError - """ - if kwargs and nopython: - raise NumbaUtilError( - "numba does not support kwargs with nopython=True: " - "https://github.com/numba/numba/issues/2916" - ) - - def get_jit_arguments( - engine_kwargs: Optional[Dict[str, bool]] = None + engine_kwargs: Optional[Dict[str, bool]] = None, kwargs: Optional[Dict] = None, ) -> Tuple[bool, bool, bool]: """ Return arguments to pass to numba.JIT, falling back on pandas default JIT settings. @@ -63,16 +34,27 @@ def get_jit_arguments( ---------- engine_kwargs : dict, default None user passed keyword arguments for numba.JIT + kwargs : dict, default None + user passed keyword arguments to pass into the JITed function Returns ------- (bool, bool, bool) nopython, nogil, parallel + + Raises + ------ + NumbaUtilError """ if engine_kwargs is None: engine_kwargs = {} nopython = engine_kwargs.get("nopython", True) + if kwargs and nopython: + raise NumbaUtilError( + "numba does not support kwargs with nopython=True: " + "https://github.com/numba/numba/issues/2916" + ) nogil = engine_kwargs.get("nogil", False) parallel = engine_kwargs.get("parallel", False) return nopython, nogil, parallel diff --git a/pandas/core/window/numba_.py b/pandas/core/window/numba_.py index aec294c3c84c2..c4858b6e5a4ab 100644 --- a/pandas/core/window/numba_.py +++ b/pandas/core/window/numba_.py @@ -6,7 +6,7 @@ from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import ( - check_kwargs_and_nopython, + NUMBA_FUNC_CACHE, get_jit_arguments, jit_user_function, ) @@ -42,14 +42,14 @@ def generate_numba_apply_func( ------- Numba function """ - nopython, nogil, parallel = get_jit_arguments(engine_kwargs) + nopython, nogil, parallel = get_jit_arguments(engine_kwargs, kwargs) - check_kwargs_and_nopython(kwargs, nopython) + cache_key = (func, "rolling_apply") + if cache_key in NUMBA_FUNC_CACHE: + return NUMBA_FUNC_CACHE[cache_key] numba_func = jit_user_function(func, nopython, nogil, parallel) - numba = import_optional_dependency("numba") - if parallel: loop_range = numba.prange else: diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 00fdf0813b027..21a7164411fb7 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1374,14 +1374,7 @@ def apply( if maybe_use_numba(engine): if raw is False: raise ValueError("raw must be `True` when using the numba engine") - cache_key = (func, "rolling_apply") - if cache_key in NUMBA_FUNC_CACHE: - # Return an already compiled version of roll_apply if available - apply_func = NUMBA_FUNC_CACHE[cache_key] - else: - apply_func = generate_numba_apply_func( - args, kwargs, func, engine_kwargs - ) + apply_func = generate_numba_apply_func(args, kwargs, func, engine_kwargs) center = self.center elif engine in ("cython", None): if engine_kwargs is not None: @@ -1403,7 +1396,7 @@ def apply( center=center, floor=0, name=func, - use_numba_cache=engine == "numba", + use_numba_cache=maybe_use_numba(engine), raw=raw, original_func=func, args=args, From 98e4a2be8eddc9e5a4050d493935969685ae32dc Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 16 Sep 2020 16:09:10 +0100 Subject: [PATCH 570/632] DOC: move release note for #36175 (pt1) (#36378) --- doc/source/whatsnew/v1.1.3.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 72526140b6eb8..9bb063b2b1590 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -33,6 +33,7 @@ Fixed regressions - Fixed regression in :class:`IntegerArray` unary plus and minus operations raising a ``TypeError`` (:issue:`36063`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`) +- Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`) - .. --------------------------------------------------------------------------- From 15285e76c0a96dae3bf60ac708438783fe242936 Mon Sep 17 00:00:00 2001 From: Simon Hawkins Date: Wed, 16 Sep 2020 16:09:44 +0100 Subject: [PATCH 571/632] DOC: move release note for #36175 (pt2) (#36379) --- doc/source/whatsnew/v1.2.0.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index f398af6e4dd5e..3992e697db7e4 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -313,10 +313,9 @@ I/O - In :meth:`read_csv` `float_precision='round_trip'` now handles `decimal` and `thousands` parameters (:issue:`35365`) - :meth:`to_pickle` and :meth:`read_pickle` were closing user-provided file objects (:issue:`35679`) - :meth:`to_csv` passes compression arguments for `'gzip'` always to `gzip.GzipFile` (:issue:`28103`) -- :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue: `35058`) +- :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue:`35058`) - :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) - :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) -- Bug in :meth:`read_excel` with `engine="odf"` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`, and :issue:`35802`) Plotting ^^^^^^^^ From 876f0400a61d5480c5d007d76e8068a822e272b0 Mon Sep 17 00:00:00 2001 From: Chris Lynch Date: Wed, 16 Sep 2020 13:59:17 -0400 Subject: [PATCH 572/632] remove trailing commas for black update (#36399) Co-authored-by: Lynch --- pandas/tests/arrays/integer/test_arithmetic.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pandas/tests/arrays/integer/test_arithmetic.py b/pandas/tests/arrays/integer/test_arithmetic.py index f549a7caeab1d..cf382dd5e37e0 100644 --- a/pandas/tests/arrays/integer/test_arithmetic.py +++ b/pandas/tests/arrays/integer/test_arithmetic.py @@ -279,9 +279,7 @@ def test_unary_minus_nullable_int(any_signed_nullable_int_dtype, source, target) tm.assert_extension_array_equal(result, expected) -@pytest.mark.parametrize( - "source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]], -) +@pytest.mark.parametrize("source", [[1, 2, 3], [1, 2, None], [-1, 0, 1]]) def test_unary_plus_nullable_int(any_signed_nullable_int_dtype, source): dtype = any_signed_nullable_int_dtype expected = pd.array(source, dtype=dtype) From 70d618ca8cbf41a5ee295657d6e10e45d7d07454 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Wed, 16 Sep 2020 21:31:08 -0500 Subject: [PATCH 573/632] BUG: Always cast to Categorical in lexsort_indexer (#36385) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/core/sorting.py | 9 +-------- .../tests/frame/methods/test_sort_values.py | 20 +++++++++++++++++++ 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 9bb063b2b1590..3f8413bd492ca 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -45,6 +45,7 @@ Bug fixes - Bug in :func:`read_spss` where passing a ``pathlib.Path`` as ``path`` would raise a ``TypeError`` (:issue:`33666`) - Bug in :meth:`Series.str.startswith` and :meth:`Series.str.endswith` with ``category`` dtype not propagating ``na`` parameter (:issue:`36241`) - Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`) +- Bug in :meth:`DataFrame.sort_values` raising an ``AttributeError`` when sorting on a key that casts column to categorical dtype (:issue:`36383`) - Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index dd6aadf570baa..ec62192464665 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -20,7 +20,6 @@ from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, - is_categorical_dtype, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ABCMultiIndex @@ -294,13 +293,7 @@ def lexsort_indexer( keys = [ensure_key_mapped(k, key) for k in keys] for k, order in zip(keys, orders): - # we are already a Categorical - if is_categorical_dtype(k): - cat = k - - # create the Categorical - else: - cat = Categorical(k, ordered=True) + cat = Categorical(k, ordered=True) if na_position not in ["last", "first"]: raise ValueError(f"invalid na_position: {na_position}") diff --git a/pandas/tests/frame/methods/test_sort_values.py b/pandas/tests/frame/methods/test_sort_values.py index c60e7e3b1bdb6..0ca232ec433e7 100644 --- a/pandas/tests/frame/methods/test_sort_values.py +++ b/pandas/tests/frame/methods/test_sort_values.py @@ -691,3 +691,23 @@ def test_sort_values_key_dict_axis(self): result = df.sort_values(1, key=lambda col: -col, axis=1) expected = df.loc[:, ::-1] tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_sort_values_key_casts_to_categorical(self, ordered): + # https://github.com/pandas-dev/pandas/issues/36383 + categories = ["c", "b", "a"] + df = pd.DataFrame({"x": [1, 1, 1], "y": ["a", "b", "c"]}) + + def sorter(key): + if key.name == "y": + return pd.Series( + pd.Categorical(key, categories=categories, ordered=ordered) + ) + return key + + result = df.sort_values(by=["x", "y"], key=sorter) + expected = pd.DataFrame( + {"x": [1, 1, 1], "y": ["c", "b", "a"]}, index=pd.Index([2, 1, 0]) + ) + + tm.assert_frame_equal(result, expected) From 1a3a2c18ff1dc4418e006ac73f15c2dfd6041d7e Mon Sep 17 00:00:00 2001 From: Erfan Nariman <34067903+erfannariman@users.noreply.github.com> Date: Thu, 17 Sep 2020 04:39:40 +0200 Subject: [PATCH 574/632] DEPR: DataFrame.lookup (#35224) --- doc/source/user_guide/indexing.rst | 22 ++++++++---- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/frame.py | 15 +++++++- pandas/tests/frame/indexing/test_indexing.py | 36 +++++++++++++++----- 4 files changed, 58 insertions(+), 16 deletions(-) diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index 74abbc9503db0..b11baad1e3eb5 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1480,17 +1480,27 @@ default value. s.get('a') # equivalent to s['a'] s.get('x', default=-1) -The :meth:`~pandas.DataFrame.lookup` method -------------------------------------------- +.. _indexing.lookup: + +Looking up values by index/column labels +---------------------------------------- Sometimes you want to extract a set of values given a sequence of row labels -and column labels, and the ``lookup`` method allows for this and returns a -NumPy array. For instance: +and column labels, this can be achieved by ``DataFrame.melt`` combined by filtering the corresponding +rows with ``DataFrame.loc``. For instance: .. ipython:: python - dflookup = pd.DataFrame(np.random.rand(20, 4), columns = ['A', 'B', 'C', 'D']) - dflookup.lookup(list(range(0, 10, 2)), ['B', 'C', 'A', 'B', 'D']) + df = pd.DataFrame({'col': ["A", "A", "B", "B"], + 'A': [80, 23, np.nan, 22], + 'B': [80, 55, 76, 67]}) + df + melt = df.melt('col') + melt = melt.loc[melt['col'] == melt['variable'], 'value'] + melt.reset_index(drop=True) + +Formerly this could be achieved with the dedicated ``DataFrame.lookup`` method +which was deprecated in version 1.2.0. .. _indexing.class: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3992e697db7e4..2c95fd95ffc7b 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -210,6 +210,7 @@ Deprecations - Deprecated parameter ``inplace`` in :meth:`MultiIndex.set_codes` and :meth:`MultiIndex.set_levels` (:issue:`35626`) - Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`) - Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`) +- :meth:`DataFrame.lookup` is deprecated and will be removed in a future version, use :meth:`DataFrame.melt` and :meth:`DataFrame.loc` instead (:issue:`18682`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index bc0e55195fb3e..36dfe43bfd708 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3842,10 +3842,15 @@ def _series(self): def lookup(self, row_labels, col_labels) -> np.ndarray: """ Label-based "fancy indexing" function for DataFrame. - Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. + .. deprecated:: 1.2.0 + DataFrame.lookup is deprecated, + use DataFrame.melt and DataFrame.loc instead. + For an example see :meth:`~pandas.DataFrame.lookup` + in the user guide. + Parameters ---------- row_labels : sequence @@ -3858,6 +3863,14 @@ def lookup(self, row_labels, col_labels) -> np.ndarray: numpy.ndarray The found values. """ + msg = ( + "The 'lookup' method is deprecated and will be" + "removed in a future version." + "You can use DataFrame.melt and DataFrame.loc" + "as a substitute." + ) + warnings.warn(msg, FutureWarning, stacklevel=2) + n = len(row_labels) if n != len(col_labels): raise ValueError("Row labels must have same size as column labels") diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index e4549dfb3e68d..b947be705a329 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1340,7 +1340,8 @@ def test_lookup_float(self, float_frame): df = float_frame rows = list(df.index) * len(df.columns) cols = list(df.columns) * len(df.index) - result = df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + result = df.lookup(rows, cols) expected = np.array([df.loc[r, c] for r, c in zip(rows, cols)]) tm.assert_numpy_array_equal(result, expected) @@ -1349,7 +1350,8 @@ def test_lookup_mixed(self, float_string_frame): df = float_string_frame rows = list(df.index) * len(df.columns) cols = list(df.columns) * len(df.index) - result = df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + result = df.lookup(rows, cols) expected = np.array( [df.loc[r, c] for r, c in zip(rows, cols)], dtype=np.object_ @@ -1365,7 +1367,8 @@ def test_lookup_bool(self): "mask_c": [False, True, False, True], } ) - df["mask"] = df.lookup(df.index, "mask_" + df["label"]) + with tm.assert_produces_warning(FutureWarning): + df["mask"] = df.lookup(df.index, "mask_" + df["label"]) exp_mask = np.array( [df.loc[r, c] for r, c in zip(df.index, "mask_" + df["label"])] @@ -1376,13 +1379,16 @@ def test_lookup_bool(self): def test_lookup_raises(self, float_frame): with pytest.raises(KeyError, match="'One or more row labels was not found'"): - float_frame.lookup(["xyz"], ["A"]) + with tm.assert_produces_warning(FutureWarning): + float_frame.lookup(["xyz"], ["A"]) with pytest.raises(KeyError, match="'One or more column labels was not found'"): - float_frame.lookup([float_frame.index[0]], ["xyz"]) + with tm.assert_produces_warning(FutureWarning): + float_frame.lookup([float_frame.index[0]], ["xyz"]) with pytest.raises(ValueError, match="same size"): - float_frame.lookup(["a", "b", "c"], ["a"]) + with tm.assert_produces_warning(FutureWarning): + float_frame.lookup(["a", "b", "c"], ["a"]) def test_lookup_requires_unique_axes(self): # GH#33041 raise with a helpful error message @@ -1393,14 +1399,17 @@ def test_lookup_requires_unique_axes(self): # homogeneous-dtype case with pytest.raises(ValueError, match="requires unique index and columns"): - df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + df.lookup(rows, cols) with pytest.raises(ValueError, match="requires unique index and columns"): - df.T.lookup(cols, rows) + with tm.assert_produces_warning(FutureWarning): + df.T.lookup(cols, rows) # heterogeneous dtype df["B"] = 0 with pytest.raises(ValueError, match="requires unique index and columns"): - df.lookup(rows, cols) + with tm.assert_produces_warning(FutureWarning): + df.lookup(rows, cols) def test_set_value(self, float_frame): for idx in float_frame.index: @@ -2232,3 +2241,12 @@ def test_object_casting_indexing_wraps_datetimelike(): assert blk.dtype == "m8[ns]" # we got the right block val = blk.iget((0, 0)) assert isinstance(val, pd.Timedelta) + + +def test_lookup_deprecated(): + # GH18262 + df = pd.DataFrame( + {"col": ["A", "A", "B", "B"], "A": [80, 23, np.nan, 22], "B": [80, 55, 76, 67]} + ) + with tm.assert_produces_warning(FutureWarning): + df.lookup(df.index, df["col"]) From b162cafeed28ea976c2d27bee3161c8841897fac Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Wed, 16 Sep 2020 19:41:13 -0700 Subject: [PATCH 575/632] ENH/BUG: consistently cast strs to datetimelike for searchsorted (#36346) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/arrays/datetimelike.py | 3 +- pandas/core/indexes/datetimelike.py | 8 ----- pandas/tests/arrays/test_datetimelike.py | 41 ++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 10 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 2c95fd95ffc7b..6436b2eceb33f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -119,6 +119,7 @@ Other enhancements - :meth:`DataFrame.explode` and :meth:`Series.explode` now support exploding of sets (:issue:`35614`) - `Styler` now allows direct CSS class name addition to individual data cells (:issue:`36159`) - :meth:`Rolling.mean()` and :meth:`Rolling.sum()` use Kahan summation to calculate the mean to avoid numerical problems (:issue:`10319`, :issue:`11645`, :issue:`13254`, :issue:`32761`, :issue:`36031`) +- :meth:`DatetimeIndex.searchsorted`, :meth:`TimedeltaIndex.searchsorted`, :meth:`PeriodIndex.searchsorted`, and :meth:`Series.searchsorted` with datetimelike dtypes will now try to cast string arguments (listlike and scalar) to the matching datetimelike type (:issue:`36346`) .. _whatsnew_120.api_breaking.python: diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index e8b1c12687584..15cbdf882eb7b 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -845,8 +845,7 @@ def _validate_searchsorted_value(self, value): if not is_list_like(value): value = self._validate_scalar(value, msg, cast_str=True) else: - # TODO: cast_str? we accept it for scalar - value = self._validate_listlike(value, "searchsorted") + value = self._validate_listlike(value, "searchsorted", cast_str=True) rv = self._unbox(value) return self._rebox_native(rv) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5ba5732c710f7..984ab49cbc517 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -178,14 +178,6 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): @doc(IndexOpsMixin.searchsorted, klass="Datetime-like Index") def searchsorted(self, value, side="left", sorter=None): - if isinstance(value, str): - raise TypeError( - "searchsorted requires compatible dtype or scalar, " - f"not {type(value).__name__}" - ) - if isinstance(value, Index): - value = value._data - return self._data.searchsorted(value, side=side, sorter=sorter) _can_hold_na = True diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 83b98525d3e8a..f512b168d2795 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -252,6 +252,47 @@ def test_searchsorted(self): else: assert result == 10 + @pytest.mark.parametrize("box", [None, "index", "series"]) + def test_searchsorted_castable_strings(self, arr1d, box): + if isinstance(arr1d, DatetimeArray): + tz = arr1d.tz + if ( + tz is not None + and tz is not pytz.UTC + and not isinstance(tz, pytz._FixedOffset) + ): + # If we have e.g. tzutc(), when we cast to string and parse + # back we get pytz.UTC, and then consider them different timezones + # so incorrectly raise. + pytest.xfail(reason="timezone comparisons inconsistent") + + arr = arr1d + if box is None: + pass + elif box == "index": + # Test the equivalent Index.searchsorted method while we're here + arr = self.index_cls(arr) + else: + # Test the equivalent Series.searchsorted method while we're here + arr = pd.Series(arr) + + # scalar + result = arr.searchsorted(str(arr[1])) + assert result == 1 + + result = arr.searchsorted(str(arr[2]), side="right") + assert result == 3 + + result = arr.searchsorted([str(x) for x in arr[1:3]]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + with pytest.raises(TypeError): + arr.searchsorted("foo") + + with pytest.raises(TypeError): + arr.searchsorted([str(arr[1]), "baz"]) + def test_getitem_2d(self, arr1d): # 2d slicing on a 1D array expected = type(arr1d)(arr1d._data[:, np.newaxis], dtype=arr1d.dtype) From 3a15e47374762cbc89a6742c4239a069414dbbfc Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Thu, 17 Sep 2020 03:11:00 -0500 Subject: [PATCH 576/632] Bump flake8 version in pre-commit-config.yaml (#36412) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fcd0ecdc9fcd2..78e28aad5865b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ repos: - id: black language_version: python3 - repo: https://gitlab.com/pycqa/flake8 - rev: 3.7.7 + rev: 3.8.3 hooks: - id: flake8 language: python_venv From f36437f05228669df3807116cecab4672771e0d7 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:06:47 -0700 Subject: [PATCH 577/632] REF: re-use validate_listlike for _convert_arr_indexer (#36415) --- pandas/core/indexes/datetimelike.py | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 984ab49cbc517..f1b5aa9e98bdb 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -8,7 +8,6 @@ from pandas._libs import NaT, Timedelta, iNaT, join as libjoin, lib from pandas._libs.tslibs import BaseOffset, Resolution, Tick, timezones -from pandas._libs.tslibs.parsing import DateParseError from pandas._typing import Callable, Label from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError @@ -31,7 +30,6 @@ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin from pandas.core.base import IndexOpsMixin import pandas.core.common as com -from pandas.core.construction import array as pd_array, extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.extension import ( @@ -586,19 +584,12 @@ def _wrap_joined_index(self, joined: np.ndarray, other): @doc(Index._convert_arr_indexer) def _convert_arr_indexer(self, keyarr): - if lib.infer_dtype(keyarr) == "string": - # Weak reasoning that indexer is a list of strings - # representing datetime or timedelta or period - try: - extension_arr = pd_array(keyarr, self.dtype) - except (ValueError, DateParseError): - # Fail to infer keyarr from self.dtype - return keyarr - - converted_arr = extract_array(extension_arr, extract_numpy=True) - else: - converted_arr = com.asarray_tuplesafe(keyarr) - return converted_arr + try: + return self._data._validate_listlike( + keyarr, "convert_arr_indexer", cast_str=True, allow_object=True + ) + except (ValueError, TypeError): + return com.asarray_tuplesafe(keyarr) class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, Int64Index): From 6e3f698358d497c4225e91eaca96d422054ac47b Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Thu, 17 Sep 2020 18:07:48 +0200 Subject: [PATCH 578/632] REF: use BlockManager.to_native_types in formatting code (#36417) --- pandas/core/internals/managers.py | 7 +++++++ pandas/io/formats/csvs.py | 10 ++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 3f446874ffd0e..865412f159ea1 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -631,6 +631,13 @@ def replace_list( bm._consolidate_inplace() return bm + def to_native_types(self, **kwargs) -> "BlockManager": + """ + Convert values to native types (strings / python objects) that are used + in formatting (repr / csv). + """ + return self.apply("to_native_types", **kwargs) + def is_consolidated(self) -> bool: """ Return True if more than one block with the same dtype diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 1bda16d126905..403eead686f89 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -334,18 +334,12 @@ def _save_body(self) -> None: self._save_chunk(start_i, end_i) def _save_chunk(self, start_i: int, end_i: int) -> None: - ncols = self.obj.shape[-1] - data = [None] * ncols - # create the data for a chunk slicer = slice(start_i, end_i) - df = self.obj.iloc[slicer] - mgr = df._mgr - res = mgr.apply("to_native_types", **self._number_format) - for i in range(len(res.items)): - data[i] = res.iget_values(i) + res = df._mgr.to_native_types(**self._number_format) + data = [res.iget_values(i) for i in range(len(res.items))] ix = self.data_index.to_native_types(slicer=slicer, **self._number_format) libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer) From be6908e577f72d8a2b04d3867faffbb003451b87 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:08:29 -0700 Subject: [PATCH 579/632] REF: re-use _maybe_promote for _is_convertible_to_index_for_join (#36416) --- pandas/core/indexes/datetimelike.py | 29 +++++------------------------ 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index f1b5aa9e98bdb..498202bea90dc 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -848,11 +848,11 @@ def join( """ See Index.join """ - if self._is_convertible_to_index_for_join(other): - try: - other = type(self)(other) - except (TypeError, ValueError): - pass + pself, pother = self._maybe_promote(other) + if pself is not self or pother is not other: + return pself.join( + pother, how=how, level=level, return_indexers=return_indexers, sort=sort + ) this, other = self._maybe_utc_convert(other) return Index.join( @@ -881,25 +881,6 @@ def _maybe_utc_convert(self, other): other = other.tz_convert("UTC") return this, other - @classmethod - def _is_convertible_to_index_for_join(cls, other: Index) -> bool: - """ - return a boolean whether I can attempt conversion to a - DatetimeIndex/TimedeltaIndex - """ - if isinstance(other, cls): - return False - elif len(other) > 0 and other.inferred_type not in ( - "floating", - "mixed-integer", - "integer", - "integer-na", - "mixed-integer-float", - "mixed", - ): - return True - return False - # -------------------------------------------------------------------- # List-Like Methods From cd26fe2093458e6b20b73071b9dfdde4ae9bdd6a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:10:34 -0700 Subject: [PATCH 580/632] REF: _validate_foo pattern for IntervalArray (#36414) --- pandas/core/arrays/interval.py | 69 ++++++++++++++++++++++----------- pandas/core/indexes/interval.py | 19 ++------- 2 files changed, 51 insertions(+), 37 deletions(-) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 436a7dd062c4a..028472ad93e52 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -640,17 +640,10 @@ def fillna(self, value=None, method=None, limit=None): if limit is not None: raise TypeError("limit is not supported for IntervalArray.") - if not isinstance(value, Interval): - msg = ( - "'IntervalArray.fillna' only supports filling with a " - f"scalar 'pandas.Interval'. Got a '{type(value).__name__}' instead." - ) - raise TypeError(msg) - - self._check_closed_matches(value, name="value") + value_left, value_right = self._validate_fillna_value(value) - left = self.left.fillna(value=value.left) - right = self.right.fillna(value=value.right) + left = self.left.fillna(value=value_left) + right = self.right.fillna(value=value_right) return self._shallow_copy(left, right) @property @@ -845,18 +838,7 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): fill_left = fill_right = fill_value if allow_fill: - if fill_value is None: - fill_left = fill_right = self.left._na_value - elif is_interval(fill_value): - self._check_closed_matches(fill_value, name="fill_value") - fill_left, fill_right = fill_value.left, fill_value.right - elif not is_scalar(fill_value) and notna(fill_value): - msg = ( - "'IntervalArray.fillna' only supports filling with a " - "'scalar pandas.Interval or NA'. " - f"Got a '{type(fill_value).__name__}' instead." - ) - raise ValueError(msg) + fill_left, fill_right = self._validate_fill_value(fill_value) left_take = take( self.left, indices, allow_fill=allow_fill, fill_value=fill_left @@ -867,6 +849,49 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): return self._shallow_copy(left_take, right_take) + def _validate_fill_value(self, value): + if is_interval(value): + self._check_closed_matches(value, name="fill_value") + fill_left, fill_right = value.left, value.right + elif not is_scalar(value) and notna(value): + msg = ( + "'IntervalArray.fillna' only supports filling with a " + "'scalar pandas.Interval or NA'. " + f"Got a '{type(value).__name__}' instead." + ) + raise ValueError(msg) + else: + fill_left = fill_right = self.left._na_value + return fill_left, fill_right + + def _validate_fillna_value(self, value): + if not isinstance(value, Interval): + msg = ( + "'IntervalArray.fillna' only supports filling with a " + f"scalar 'pandas.Interval'. Got a '{type(value).__name__}' instead." + ) + raise TypeError(msg) + + self._check_closed_matches(value, name="value") + return value.left, value.right + + def _validate_insert_value(self, value): + if isinstance(value, Interval): + if value.closed != self.closed: + raise ValueError( + "inserted item must be closed on the same side as the index" + ) + left_insert = value.left + right_insert = value.right + elif is_scalar(value) and isna(value): + # GH#18295 + left_insert = right_insert = value + else: + raise ValueError( + "can only insert Interval objects and NA into an IntervalIndex" + ) + return left_insert, right_insert + def value_counts(self, dropna=True): """ Returns a Series containing counts of each interval. diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index ad0a7ea32a1cc..9ef584f5b7fbc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -333,7 +333,9 @@ def from_tuples( # -------------------------------------------------------------------- @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, name: Label = lib.no_default): + def _shallow_copy( + self, values: Optional[IntervalArray] = None, name: Label = lib.no_default + ): name = self.name if name is lib.no_default else name cache = self._cache.copy() if values is None else {} if values is None: @@ -927,20 +929,7 @@ def insert(self, loc, item): ------- IntervalIndex """ - if isinstance(item, Interval): - if item.closed != self.closed: - raise ValueError( - "inserted item must be closed on the same side as the index" - ) - left_insert = item.left - right_insert = item.right - elif is_scalar(item) and isna(item): - # GH 18295 - left_insert = right_insert = item - else: - raise ValueError( - "can only insert Interval objects and NA into an IntervalIndex" - ) + left_insert, right_insert = self._data._validate_insert_value(item) new_left = self.left.insert(loc, left_insert) new_right = self.right.insert(loc, right_insert) From 81d24c777b7ea70309bf36f6b4180490d8dff360 Mon Sep 17 00:00:00 2001 From: Chris Barnes Date: Thu, 17 Sep 2020 12:34:10 -0400 Subject: [PATCH 581/632] Update isort version in pre-commit config (#36428) Previously, a mismatch between the isort versions specified by pre-commit and by the files used on CI made it impossible to make a commit which passed both pre-commit and CI lints. --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 78e28aad5865b..309e22e71a523 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,7 +25,7 @@ repos: - file args: [--append-config=flake8/cython-template.cfg] - repo: https://github.com/pre-commit/mirrors-isort - rev: v4.3.21 + rev: v5.2.2 hooks: - id: isort language: python_venv From 5eb1adde8a6dc12fc120952133471dcdf93fd9a5 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Thu, 17 Sep 2020 11:36:46 -0500 Subject: [PATCH 582/632] CLN: Clean series/test_arithmetic.py (#36406) --- pandas/tests/series/test_arithmetic.py | 98 +++++++++++--------------- 1 file changed, 42 insertions(+), 56 deletions(-) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index a420a1f7d6bca..8fad6ee1cca8b 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -260,73 +260,59 @@ def test_sub_datetimelike_align(self): class TestSeriesFlexComparison: - def test_comparison_flex_basic(self): + @pytest.mark.parametrize("axis", [0, None, "index"]) + def test_comparison_flex_basic(self, axis, all_compare_operators): + op = all_compare_operators.strip("__") left = pd.Series(np.random.randn(10)) right = pd.Series(np.random.randn(10)) + result = getattr(left, op)(right, axis=axis) + expected = getattr(operator, op)(left, right) + tm.assert_series_equal(result, expected) - tm.assert_series_equal(left.eq(right), left == right) - tm.assert_series_equal(left.ne(right), left != right) - tm.assert_series_equal(left.le(right), left < right) - tm.assert_series_equal(left.lt(right), left <= right) - tm.assert_series_equal(left.gt(right), left > right) - tm.assert_series_equal(left.ge(right), left >= right) - - for axis in [0, None, "index"]: - tm.assert_series_equal(left.eq(right, axis=axis), left == right) - tm.assert_series_equal(left.ne(right, axis=axis), left != right) - tm.assert_series_equal(left.le(right, axis=axis), left < right) - tm.assert_series_equal(left.lt(right, axis=axis), left <= right) - tm.assert_series_equal(left.gt(right, axis=axis), left > right) - tm.assert_series_equal(left.ge(right, axis=axis), left >= right) + def test_comparison_bad_axis(self, all_compare_operators): + op = all_compare_operators.strip("__") + left = pd.Series(np.random.randn(10)) + right = pd.Series(np.random.randn(10)) msg = "No axis named 1 for object type" - for op in ["eq", "ne", "le", "le", "gt", "ge"]: - with pytest.raises(ValueError, match=msg): - getattr(left, op)(right, axis=1) + with pytest.raises(ValueError, match=msg): + getattr(left, op)(right, axis=1) - def test_comparison_flex_alignment(self): + @pytest.mark.parametrize( + "values, op", + [ + ([False, False, True, False], "eq"), + ([True, True, False, True], "ne"), + ([False, False, True, False], "le"), + ([False, False, False, False], "lt"), + ([False, True, True, False], "ge"), + ([False, True, False, False], "gt"), + ], + ) + def test_comparison_flex_alignment(self, values, op): left = Series([1, 3, 2], index=list("abc")) right = Series([2, 2, 2], index=list("bcd")) + result = getattr(left, op)(right) + expected = pd.Series(values, index=list("abcd")) + tm.assert_series_equal(result, expected) - exp = pd.Series([False, False, True, False], index=list("abcd")) - tm.assert_series_equal(left.eq(right), exp) - - exp = pd.Series([True, True, False, True], index=list("abcd")) - tm.assert_series_equal(left.ne(right), exp) - - exp = pd.Series([False, False, True, False], index=list("abcd")) - tm.assert_series_equal(left.le(right), exp) - - exp = pd.Series([False, False, False, False], index=list("abcd")) - tm.assert_series_equal(left.lt(right), exp) - - exp = pd.Series([False, True, True, False], index=list("abcd")) - tm.assert_series_equal(left.ge(right), exp) - - exp = pd.Series([False, True, False, False], index=list("abcd")) - tm.assert_series_equal(left.gt(right), exp) - - def test_comparison_flex_alignment_fill(self): + @pytest.mark.parametrize( + "values, op, fill_value", + [ + ([False, False, True, True], "eq", 2), + ([True, True, False, False], "ne", 2), + ([False, False, True, True], "le", 0), + ([False, False, False, True], "lt", 0), + ([True, True, True, False], "ge", 0), + ([True, True, False, False], "gt", 0), + ], + ) + def test_comparison_flex_alignment_fill(self, values, op, fill_value): left = Series([1, 3, 2], index=list("abc")) right = Series([2, 2, 2], index=list("bcd")) - - exp = pd.Series([False, False, True, True], index=list("abcd")) - tm.assert_series_equal(left.eq(right, fill_value=2), exp) - - exp = pd.Series([True, True, False, False], index=list("abcd")) - tm.assert_series_equal(left.ne(right, fill_value=2), exp) - - exp = pd.Series([False, False, True, True], index=list("abcd")) - tm.assert_series_equal(left.le(right, fill_value=0), exp) - - exp = pd.Series([False, False, False, True], index=list("abcd")) - tm.assert_series_equal(left.lt(right, fill_value=0), exp) - - exp = pd.Series([True, True, True, False], index=list("abcd")) - tm.assert_series_equal(left.ge(right, fill_value=0), exp) - - exp = pd.Series([True, True, False, False], index=list("abcd")) - tm.assert_series_equal(left.gt(right, fill_value=0), exp) + result = getattr(left, op)(right, fill_value=fill_value) + expected = pd.Series(values, index=list("abcd")) + tm.assert_series_equal(result, expected) class TestSeriesComparison: From a76f701b530c5851418591375d91bb1fdf146bb4 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:38:34 -0700 Subject: [PATCH 583/632] BUG: Categorical.sort_values inplace breaking views (#36404) --- pandas/core/arrays/categorical.py | 2 +- pandas/tests/arrays/categorical/test_sorting.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 25073282ec0f6..ba7534082d0d6 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1541,7 +1541,7 @@ def sort_values( sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if inplace: - self._codes = self._codes[sorted_idx] + self._codes[:] = self._codes[sorted_idx] else: codes = self._codes[sorted_idx] return self._from_backing_data(codes) diff --git a/pandas/tests/arrays/categorical/test_sorting.py b/pandas/tests/arrays/categorical/test_sorting.py index 2a0ef043bf9a9..9589216557cd5 100644 --- a/pandas/tests/arrays/categorical/test_sorting.py +++ b/pandas/tests/arrays/categorical/test_sorting.py @@ -66,7 +66,9 @@ def test_sort_values(self): # sort (inplace order) cat1 = cat.copy() + orig_codes = cat1._codes cat1.sort_values(inplace=True) + assert cat1._codes is orig_codes exp = np.array(["a", "b", "c", "d"], dtype=object) tm.assert_numpy_array_equal(cat1.__array__(), exp) tm.assert_index_equal(res.categories, cat.categories) From d4947a922812eca78684954f6e8ac2a417d1fb10 Mon Sep 17 00:00:00 2001 From: Maxim Ivanov <41443370+ivanovmg@users.noreply.github.com> Date: Thu, 17 Sep 2020 23:39:10 +0700 Subject: [PATCH 584/632] TYP: alias IndexLabel without Optional (#36401) --- pandas/_typing.py | 2 +- pandas/core/generic.py | 2 +- pandas/io/formats/csvs.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pandas/_typing.py b/pandas/_typing.py index 7aef5c02e290f..16d81c0d39cbe 100644 --- a/pandas/_typing.py +++ b/pandas/_typing.py @@ -83,7 +83,7 @@ Axis = Union[str, int] Label = Optional[Hashable] -IndexLabel = Optional[Union[Label, Sequence[Label]]] +IndexLabel = Union[Label, Sequence[Label]] Level = Union[Label, int] Ordered = Optional[bool] JSONSerializable = Optional[Union[PythonScalar, List, Dict]] diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 814f307749d50..cc18b8681200f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3154,7 +3154,7 @@ def to_csv( columns: Optional[Sequence[Label]] = None, header: Union[bool_t, List[str]] = True, index: bool_t = True, - index_label: IndexLabel = None, + index_label: Optional[IndexLabel] = None, mode: str = "w", encoding: Optional[str] = None, compression: CompressionOptions = "infer", diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 403eead686f89..4250a08f748d7 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -42,7 +42,7 @@ def __init__( cols: Optional[Sequence[Label]] = None, header: Union[bool, Sequence[Hashable]] = True, index: bool = True, - index_label: IndexLabel = None, + index_label: Optional[IndexLabel] = None, mode: str = "w", encoding: Optional[str] = None, errors: str = "strict", @@ -100,7 +100,7 @@ def index_label(self) -> IndexLabel: return self._index_label @index_label.setter - def index_label(self, index_label: IndexLabel) -> None: + def index_label(self, index_label: Optional[IndexLabel]) -> None: if index_label is not False: if index_label is None: index_label = self._get_index_label_from_obj() From 52c81a97fb628a54de915bcdaf5feea3107baf5a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:40:48 -0700 Subject: [PATCH 585/632] REF: implement putmask for CI/DTI/TDI/PI (#36400) --- pandas/core/arrays/categorical.py | 5 +++++ pandas/core/indexes/base.py | 3 --- pandas/core/indexes/category.py | 11 +++++++++++ pandas/core/indexes/datetimelike.py | 13 ++++++++++++- pandas/tests/indexes/common.py | 7 ++++--- 5 files changed, 32 insertions(+), 7 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ba7534082d0d6..1cb428218296f 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1171,6 +1171,11 @@ def map(self, mapper): # ------------------------------------------------------------- # Validators; ideally these can be de-duplicated + def _validate_where_value(self, value): + if is_scalar(value): + return self._validate_fill_value(value) + return self._validate_listlike(value) + def _validate_insert_value(self, value) -> int: code = self.categories.get_indexer([value]) if (code == -1) and not (is_scalar(value) and isna(value)): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 15944565cb254..a2f11160b2fdc 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4232,9 +4232,6 @@ def putmask(self, mask, value): try: converted = self._validate_fill_value(value) np.putmask(values, mask, converted) - if is_period_dtype(self.dtype): - # .values cast to object, so we need to cast back - values = type(self)(values)._data return self._shallow_copy(values) except (ValueError, TypeError) as err: if is_object_dtype(self): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 9e4714060e23e..d73b36eff69f3 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -422,6 +422,17 @@ def where(self, cond, other=None): cat = Categorical(values, dtype=self.dtype) return type(self)._simple_new(cat, name=self.name) + def putmask(self, mask, value): + try: + code_value = self._data._validate_where_value(value) + except (TypeError, ValueError): + return self.astype(object).putmask(mask, value) + + codes = self._data._ndarray.copy() + np.putmask(codes, mask, code_value) + cat = self._data._from_backing_data(codes) + return type(self)._simple_new(cat, name=self.name) + def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 498202bea90dc..122977eee99fb 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -474,7 +474,18 @@ def where(self, cond, other=None): raise TypeError(f"Where requires matching dtype, not {oth}") from err result = np.where(cond, values, other).astype("i8") - arr = type(self._data)._simple_new(result, dtype=self.dtype) + arr = self._data._from_backing_data(result) + return type(self)._simple_new(arr, name=self.name) + + def putmask(self, mask, value): + try: + value = self._data._validate_where_value(value) + except (TypeError, ValueError): + return self.astype(object).putmask(mask, value) + + result = self._data._ndarray.copy() + np.putmask(result, mask, value) + arr = self._data._from_backing_data(result) return type(self)._simple_new(arr, name=self.name) def _summary(self, name=None) -> str: diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 11dc232af8de4..0e9e5c0b32d18 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -846,16 +846,17 @@ def test_map_str(self): def test_putmask_with_wrong_mask(self): # GH18368 index = self.create_index() + fill = index[0] msg = "putmask: mask and data must be the same size" with pytest.raises(ValueError, match=msg): - index.putmask(np.ones(len(index) + 1, np.bool_), 1) + index.putmask(np.ones(len(index) + 1, np.bool_), fill) with pytest.raises(ValueError, match=msg): - index.putmask(np.ones(len(index) - 1, np.bool_), 1) + index.putmask(np.ones(len(index) - 1, np.bool_), fill) with pytest.raises(ValueError, match=msg): - index.putmask("foo", 1) + index.putmask("foo", fill) @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize("name", [None, "foo"]) From 28068daab2bbc5d10c2c6f84bb8e126e6306d88b Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:46:12 -0700 Subject: [PATCH 586/632] REF: share __getitem__ for Categorical/PandasArray/DTA/TDA/PA (#36391) --- pandas/core/arrays/_mixins.py | 26 ++++++++++++++++++++++++++ pandas/core/arrays/categorical.py | 14 ++++---------- pandas/core/arrays/datetimelike.py | 23 +++++++---------------- pandas/core/arrays/numpy_.py | 14 ++++---------- 4 files changed, 41 insertions(+), 36 deletions(-) diff --git a/pandas/core/arrays/_mixins.py b/pandas/core/arrays/_mixins.py index 284dd31ffcb59..a947ab64f7380 100644 --- a/pandas/core/arrays/_mixins.py +++ b/pandas/core/arrays/_mixins.py @@ -2,6 +2,7 @@ import numpy as np +from pandas._libs import lib from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly, doc @@ -30,6 +31,12 @@ def _from_backing_data(self: _T, arr: np.ndarray) -> _T: """ raise AbstractMethodError(self) + def _box_func(self, x): + """ + Wrap numpy type in our dtype.type if necessary. + """ + return x + # ------------------------------------------------------------------------ def take( @@ -168,3 +175,22 @@ def _validate_setitem_key(self, key): def _validate_setitem_value(self, value): return value + + def __getitem__(self, key): + if lib.is_integer(key): + # fast-path + result = self._ndarray[key] + if self.ndim == 1: + return self._box_func(result) + return self._from_backing_data(result) + + key = self._validate_getitem_key(key) + result = self._ndarray[key] + if lib.is_scalar(result): + return self._box_func(result) + + result = self._from_backing_data(result) + return result + + def _validate_getitem_key(self, key): + return check_array_indexer(self, key) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 1cb428218296f..ef69d6565cfeb 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -1887,17 +1887,11 @@ def __getitem__(self, key): """ Return an item. """ - if isinstance(key, (int, np.integer)): - i = self._codes[key] - return self._box_func(i) - - key = check_array_indexer(self, key) - - result = self._codes[key] - if result.ndim > 1: + result = super().__getitem__(key) + if getattr(result, "ndim", 0) > 1: + result = result._ndarray deprecate_ndim_indexing(result) - return result - return self._from_backing_data(result) + return result def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 15cbdf882eb7b..a5b91afac338d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -539,23 +539,11 @@ def __getitem__(self, key): This getitem defers to the underlying array, which by-definition can only handle list-likes, slices, and integer scalars """ - - if lib.is_integer(key): - # fast-path - result = self._ndarray[key] - if self.ndim == 1: - return self._box_func(result) - return self._from_backing_data(result) - - key = self._validate_getitem_key(key) - result = self._ndarray[key] + result = super().__getitem__(key) if lib.is_scalar(result): - return self._box_func(result) - - result = self._from_backing_data(result) + return result - freq = self._get_getitem_freq(key) - result._freq = freq + result._freq = self._get_getitem_freq(key) return result def _validate_getitem_key(self, key): @@ -572,7 +560,7 @@ def _validate_getitem_key(self, key): # this for now (would otherwise raise in check_array_indexer) pass else: - key = check_array_indexer(self, key) + key = super()._validate_getitem_key(key) return key def _get_getitem_freq(self, key): @@ -582,7 +570,10 @@ def _get_getitem_freq(self, key): is_period = is_period_dtype(self.dtype) if is_period: freq = self.freq + elif self.ndim != 1: + freq = None else: + key = self._validate_getitem_key(key) # maybe ndarray[bool] -> slice freq = None if isinstance(key, slice): if self.freq is not None and key.step is not None: diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 61ffa28d31ba0..afcae2c5c8b43 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -19,7 +19,6 @@ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.base import ExtensionOpsMixin from pandas.core.construction import extract_array -from pandas.core.indexers import check_array_indexer from pandas.core.missing import backfill_1d, pad_1d @@ -248,16 +247,11 @@ def __array_ufunc__(self, ufunc, method: str, *inputs, **kwargs): # ------------------------------------------------------------------------ # Pandas ExtensionArray Interface - def __getitem__(self, item): - if isinstance(item, type(self)): - item = item._ndarray + def _validate_getitem_key(self, key): + if isinstance(key, type(self)): + key = key._ndarray - item = check_array_indexer(self, item) - - result = self._ndarray[item] - if not lib.is_scalar(item): - result = type(self)(result) - return result + return super()._validate_getitem_key(key) def _validate_setitem_value(self, value): value = extract_array(value, extract_numpy=True) From 042515d25b61d0b7ab6fdfe8376b5bfe15be2f73 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 09:47:20 -0700 Subject: [PATCH 587/632] CLN: remove unnecessary _convert_index_indexer (#36394) --- pandas/core/indexes/base.py | 17 +---------------- pandas/core/indexes/category.py | 4 ---- pandas/core/indexes/numeric.py | 9 --------- pandas/tests/indexes/test_numeric.py | 8 ++++++-- 4 files changed, 7 insertions(+), 31 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a2f11160b2fdc..11490e2e0be29 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3252,7 +3252,7 @@ def _convert_listlike_indexer(self, keyarr): Return tuple-safe keys. """ if isinstance(keyarr, Index): - keyarr = self._convert_index_indexer(keyarr) + pass else: keyarr = self._convert_arr_indexer(keyarr) @@ -3275,21 +3275,6 @@ def _convert_arr_indexer(self, keyarr): keyarr = com.asarray_tuplesafe(keyarr) return keyarr - def _convert_index_indexer(self, keyarr): - """ - Convert an Index indexer to the appropriate dtype. - - Parameters - ---------- - keyarr : Index (or sub-class) - Indexer to convert. - - Returns - ------- - converted_keyarr : Index (or sub-class) - """ - return keyarr - def _convert_list_indexer(self, keyarr): """ Convert a list-like indexer to the appropriate dtype. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d73b36eff69f3..c798ae0bd4e4d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -583,10 +583,6 @@ def _convert_arr_indexer(self, keyarr): return self._shallow_copy(keyarr) - @doc(Index._convert_index_indexer) - def _convert_index_indexer(self, keyarr): - return self._shallow_copy(keyarr) - @doc(Index._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side, kind): if kind == "loc": diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index f6859cbc4c0a2..026f128bae4be 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -308,15 +308,6 @@ def _convert_arr_indexer(self, keyarr): return com.asarray_tuplesafe(keyarr, dtype=dtype) - @doc(Index._convert_index_indexer) - def _convert_index_indexer(self, keyarr): - # Cast the indexer to uint64 if possible so - # that the values returned from indexing are - # also uint64. - if keyarr.is_integer(): - return keyarr.astype(np.uint64) - return keyarr - # ---------------------------------------------------------------- @classmethod diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 1ffdbbc9afd3f..bbd72b2ac5d60 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -631,7 +631,11 @@ def test_range_float_union_dtype(): tm.assert_index_equal(result, expected) -def test_uint_index_does_not_convert_to_float64(): +@pytest.mark.parametrize( + "box", + [list, lambda x: np.array(x, dtype=object), lambda x: pd.Index(x, dtype=object)], +) +def test_uint_index_does_not_convert_to_float64(box): # https://github.com/pandas-dev/pandas/issues/28279 # https://github.com/pandas-dev/pandas/issues/28023 series = pd.Series( @@ -646,7 +650,7 @@ def test_uint_index_does_not_convert_to_float64(): ], ) - result = series.loc[[7606741985629028552, 17876870360202815256]] + result = series.loc[box([7606741985629028552, 17876870360202815256])] expected = UInt64Index( [7606741985629028552, 17876870360202815256, 17876870360202815256], From b4d0ae5016133271c1dae3082e3f3dd97684ecb0 Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Thu, 17 Sep 2020 17:50:59 +0100 Subject: [PATCH 588/632] PERF: StringArray construction (#36325) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/arrays/string_.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6436b2eceb33f..6923b42d3340b 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -221,7 +221,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`) +- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`) - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`) - Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 381968f9724b6..cef35f2b1137c 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -199,13 +199,18 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): assert dtype == "string" result = np.asarray(scalars, dtype="object") - # convert non-na-likes to str, and nan-likes to StringDtype.na_value result = lib.ensure_string_array( result, na_value=StringDtype.na_value, copy=copy ) - return cls(result) + # Manually creating new array avoids the validation step in the __init__, so is + # faster. Refactor need for validation? + new_string_array = object.__new__(cls) + new_string_array._dtype = StringDtype() + new_string_array._ndarray = result + + return new_string_array @classmethod def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): From 5481e6cb2548ebd8d5c4a18b32f9066564ceafb6 Mon Sep 17 00:00:00 2001 From: Zak Kohler Date: Thu, 17 Sep 2020 17:20:06 -0400 Subject: [PATCH 589/632] Fix typo in docstring 'handler' --> 'handle' (#36427) --- pandas/io/excel/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 46327daac2e43..45da2d7d28fab 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -50,7 +50,7 @@ If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, - such as a file handler (e.g. via builtin ``open`` function) + such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. sheet_name : str, int, list, or None, default 0 Strings are used for sheet names. Integers are used in zero-indexed From 6537ad8ec001a566453d571fb62a7544af58ce18 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Thu, 17 Sep 2020 17:03:03 -0500 Subject: [PATCH 590/632] ADMIN: Update stale PR action (#36382) --- .github/workflows/stale-pr.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml index 0cbe4b7dd4582..a6aece34478d9 100644 --- a/.github/workflows/stale-pr.yml +++ b/.github/workflows/stale-pr.yml @@ -12,9 +12,9 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request is stale because it has been open for thirty days with no activity." - skip-stale-pr-message: false + skip-stale-pr-message: true stale-pr-label: "Stale" - exempt-pr-labels: "Needs Review,Blocked" + exempt-pr-labels: "Needs Review,Blocked,Needs Discussion" days-before-stale: 30 days-before-close: -1 remove-stale-when-updated: true From a607bd7de51b12bc7b77f9cb54b7514b5759cdef Mon Sep 17 00:00:00 2001 From: Irv Lustig Date: Thu, 17 Sep 2020 18:39:41 -0400 Subject: [PATCH 591/632] Fix documentation for new float_precision on read_csv (#36358) --- pandas/io/parsers.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 0f1cce273a146..43ffbe6bdd66c 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -338,9 +338,12 @@ option can improve performance because there is no longer any I/O overhead. float_precision : str, optional Specifies which converter the C engine should use for floating-point - values. The options are `None` or `high` for the ordinary converter, - `legacy` for the original lower precision pandas converter, and - `round_trip` for the round-trip converter. + values. The options are ``None`` or 'high' for the ordinary converter, + 'legacy' for the original lower precision pandas converter, and + 'round_trip' for the round-trip converter. + + .. versionchanged:: 1.2 + storage_options : dict, optional Extra options that make sense for a particular storage connection, e.g. host, port, username, password, etc., if using a URL that will @@ -349,7 +352,7 @@ a file-like buffer. See the fsspec and backend storage implementation docs for the set of allowed keys and values. - .. versionadded:: 1.2.0 + .. versionadded:: 1.2 Returns ------- @@ -2290,9 +2293,10 @@ def TextParser(*args, **kwds): can be inferred, there often will be a large parsing speed-up. float_precision : str, optional Specifies which converter the C engine should use for floating-point - values. The options are None for the ordinary converter, - 'high' for the high-precision converter, and 'round_trip' for the - round-trip converter. + values. The options are `None` or `high` for the ordinary converter, + `legacy` for the original lower precision pandas converter, and + `round_trip` for the round-trip converter. + .. versionchanged:: 1.2 """ kwds["engine"] = "python" From 970517ef9b4a3e01f31e40633bfe15f6d51d2211 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Thu, 17 Sep 2020 18:11:43 -0500 Subject: [PATCH 592/632] BLD/CI fix arm64 build #36397 (#36403) --- .travis.yml | 6 +----- ci/deps/travis-37-arm64.yaml | 1 - ci/setup_env.sh | 3 ++- pandas/conftest.py | 3 +++ pandas/tests/arithmetic/test_datetime64.py | 1 + pandas/tests/frame/test_constructors.py | 1 + pandas/tests/groupby/test_groupby_dropna.py | 1 + pandas/tests/groupby/transform/test_transform.py | 1 + pandas/tests/indexes/common.py | 1 + pandas/tests/indexes/multi/test_duplicates.py | 1 + pandas/tests/indexes/multi/test_integrity.py | 1 + pandas/tests/indexes/multi/test_setops.py | 1 + pandas/tests/indexes/period/test_indexing.py | 1 + pandas/tests/indexing/interval/test_interval.py | 1 + .../tests/indexing/multiindex/test_chaining_and_caching.py | 1 + pandas/tests/indexing/test_chaining_and_caching.py | 1 + pandas/tests/indexing/test_loc.py | 1 + pandas/tests/test_sorting.py | 1 + pandas/tests/tseries/offsets/test_offsets_properties.py | 2 ++ pandas/tests/tseries/offsets/test_ticks.py | 1 + 20 files changed, 23 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 93c8238bb5059..a38e90bbce8ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,7 +42,7 @@ matrix: - arch: arm64 env: - - JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard)" + - JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard and not arm_slow)" - env: - JOB="3.7, locale" ENV_FILE="ci/deps/travis-37-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1" @@ -58,10 +58,6 @@ matrix: services: - mysql - postgresql - allow_failures: - - arch: arm64 - env: - - JOB="3.7, arm64" PYTEST_WORKERS=8 ENV_FILE="ci/deps/travis-37-arm64.yaml" PATTERN="(not slow and not network and not clipboard)" before_install: diff --git a/ci/deps/travis-37-arm64.yaml b/ci/deps/travis-37-arm64.yaml index d04b1ca0bdcfc..8df6104f43a50 100644 --- a/ci/deps/travis-37-arm64.yaml +++ b/ci/deps/travis-37-arm64.yaml @@ -1,6 +1,5 @@ name: pandas-dev channels: - - defaults - conda-forge dependencies: - python=3.7.* diff --git a/ci/setup_env.sh b/ci/setup_env.sh index aa43d8b7dd00a..961433204cfbb 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -42,8 +42,9 @@ else fi if [ "${TRAVIS_CPU_ARCH}" == "arm64" ]; then + sudo apt-get update sudo apt-get -y install xvfb - CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.2-1/Miniforge3-4.8.2-1-Linux-aarch64.sh" + CONDA_URL="https://github.com/conda-forge/miniforge/releases/download/4.8.5-0/Miniforge3-4.8.5-0-Linux-aarch64.sh" else CONDA_URL="https://repo.continuum.io/miniconda/Miniconda3-latest-$CONDA_OS.sh" fi diff --git a/pandas/conftest.py b/pandas/conftest.py index e79370e53ead6..604815d496f80 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -55,6 +55,9 @@ def pytest_configure(config): ) config.addinivalue_line("markers", "high_memory: mark a test as a high-memory only") config.addinivalue_line("markers", "clipboard: mark a pd.read_clipboard test") + config.addinivalue_line( + "markers", "arm_slow: mark a test as slow for arm64 architecture" + ) def pytest_addoption(parser): diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 5dfaea7c77420..0dd389ed516c7 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -749,6 +749,7 @@ class TestDatetime64Arithmetic: # ------------------------------------------------------------- # Addition/Subtraction of timedelta-like + @pytest.mark.arm_slow def test_dt64arr_add_timedeltalike_scalar( self, tz_naive_fixture, two_hours, box_with_array ): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index eb334e811c5a4..63a2160e128ed 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2588,6 +2588,7 @@ def test_to_frame_with_falsey_names(self): result = DataFrame(Series(name=0, dtype=object)).dtypes tm.assert_series_equal(result, expected) + @pytest.mark.arm_slow @pytest.mark.parametrize("dtype", [None, "uint8", "category"]) def test_constructor_range_dtype(self, dtype): expected = DataFrame({"A": [0, 1, 2, 3, 4]}, dtype=dtype or "int64") diff --git a/pandas/tests/groupby/test_groupby_dropna.py b/pandas/tests/groupby/test_groupby_dropna.py index 66db06eeebdfb..deb73acbb158a 100644 --- a/pandas/tests/groupby/test_groupby_dropna.py +++ b/pandas/tests/groupby/test_groupby_dropna.py @@ -238,6 +238,7 @@ def test_groupby_dropna_multi_index_dataframe_agg(dropna, tuples, outputs): tm.assert_frame_equal(grouped, expected) +@pytest.mark.arm_slow @pytest.mark.parametrize( "datetime1, datetime2", [ diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index c09f35526a6bf..97be039e16ebb 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -675,6 +675,7 @@ def test_groupby_cum_skipna(op, skipna, input, exp): tm.assert_series_equal(expected, result) +@pytest.mark.arm_slow @pytest.mark.parametrize( "op, args, targop", [ diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 0e9e5c0b32d18..b01cafc9b0d5c 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -906,6 +906,7 @@ def test_is_unique(self): index_na_dup = index_na.insert(0, np.nan) assert index_na_dup.is_unique is False + @pytest.mark.arm_slow def test_engine_reference_cycle(self): # GH27585 index = self.create_index() diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index 9add4b478da47..aa2f37dad152c 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -241,6 +241,7 @@ def test_duplicated(idx_dup, keep, expected): tm.assert_numpy_array_equal(result, expected) +@pytest.mark.arm_slow def test_duplicated_large(keep): # GH 9125 n, k = 200, 5000 diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index c776a33717ccd..6a353fe1ad6e7 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -118,6 +118,7 @@ def test_consistency(): assert index.is_unique is False +@pytest.mark.arm_slow def test_hash_collisions(): # non-smoke test that we don't get hash collisions diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index d7427ee622977..6d4928547cad1 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -37,6 +37,7 @@ def test_intersection_base(idx, sort, klass): first.intersection([1, 2, 3], sort=sort) +@pytest.mark.arm_slow @pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) def test_union_base(idx, sort, klass): first = idx[::-1] diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index d2499b85ad181..f42499147cdbb 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -157,6 +157,7 @@ def test_getitem_list_periods(self): exp = ts.iloc[[1]] tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp) + @pytest.mark.arm_slow def test_getitem_seconds(self): # GH#6716 didx = date_range(start="2013/01/01 09:00:00", freq="S", periods=4000) diff --git a/pandas/tests/indexing/interval/test_interval.py b/pandas/tests/indexing/interval/test_interval.py index 634020982b1c2..8976e87a1b75a 100644 --- a/pandas/tests/indexing/interval/test_interval.py +++ b/pandas/tests/indexing/interval/test_interval.py @@ -71,6 +71,7 @@ def test_non_matching(self): with pytest.raises(KeyError, match="^$"): s.loc[[-1, 3]] + @pytest.mark.arm_slow def test_large_series(self): s = Series( np.arange(1000000), index=IntervalIndex.from_breaks(np.arange(1000001)) diff --git a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py index d3b13336e2a44..62c0171fe641f 100644 --- a/pandas/tests/indexing/multiindex/test_chaining_and_caching.py +++ b/pandas/tests/indexing/multiindex/test_chaining_and_caching.py @@ -49,6 +49,7 @@ def test_cache_updating(): assert result == 2 +@pytest.mark.arm_slow def test_indexer_caching(): # GH5727 # make sure that indexers are in the _internal_names_set diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index 9910ef1b04b1a..66835c586e6c7 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -132,6 +132,7 @@ def test_setitem_chained_setfault(self): result = df.head() tm.assert_frame_equal(result, expected) + @pytest.mark.arm_slow def test_detect_chained_assignment(self): pd.set_option("chained_assignment", "raise") diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 9a6f30ec920cc..9b9bca77e17ec 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -785,6 +785,7 @@ def test_loc_non_unique(self): expected = DataFrame({"A": [2, 4, 5], "B": [4, 6, 7]}, index=[1, 1, 2]) tm.assert_frame_equal(result, expected) + @pytest.mark.arm_slow def test_loc_non_unique_memory_error(self): # GH 4280 diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 98297474243e4..deb7434694d01 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -60,6 +60,7 @@ def test_int64_overflow(self): assert left[k] == v assert len(left) == len(right) + @pytest.mark.arm_slow def test_int64_overflow_moar(self): # GH9096 diff --git a/pandas/tests/tseries/offsets/test_offsets_properties.py b/pandas/tests/tseries/offsets/test_offsets_properties.py index ca14b202ef888..0fa9081d606b0 100644 --- a/pandas/tests/tseries/offsets/test_offsets_properties.py +++ b/pandas/tests/tseries/offsets/test_offsets_properties.py @@ -12,6 +12,7 @@ from hypothesis import assume, given, strategies as st from hypothesis.extra.dateutil import timezones as dateutil_timezones from hypothesis.extra.pytz import timezones as pytz_timezones +import pytest import pandas as pd from pandas import Timestamp @@ -84,6 +85,7 @@ # Offset-specific behaviour tests +@pytest.mark.arm_slow @given(gen_random_datetime, gen_yqm_offset) def test_on_offset_implementations(dt, offset): assume(not offset.normalize) diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index 10c239c683bc0..cc23f5f3201da 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -64,6 +64,7 @@ def test_tick_add_sub(cls, n, m): assert left - right == expected +@pytest.mark.arm_slow @pytest.mark.parametrize("cls", tick_classes) @settings(deadline=None) @example(n=2, m=3) From 234f5acc238c52adda88daf87b873f45196d23c0 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 16:20:29 -0700 Subject: [PATCH 593/632] BUG: FooIndex.insert casting datetimelike NAs incorrectly (#36374) --- pandas/core/arrays/interval.py | 4 ++-- pandas/core/dtypes/missing.py | 3 +++ pandas/core/indexes/numeric.py | 9 +++++++-- pandas/tests/indexes/interval/test_interval.py | 11 ++++++++++- pandas/tests/indexes/ranges/test_range.py | 6 +++++- pandas/tests/indexes/test_numeric.py | 8 ++++++-- 6 files changed, 33 insertions(+), 8 deletions(-) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 028472ad93e52..706b089e929a9 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -37,7 +37,7 @@ ABCPeriodIndex, ABCSeries, ) -from pandas.core.dtypes.missing import isna, notna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna, notna from pandas.core.algorithms import take, value_counts from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs @@ -883,7 +883,7 @@ def _validate_insert_value(self, value): ) left_insert = value.left right_insert = value.right - elif is_scalar(value) and isna(value): + elif is_valid_nat_for_dtype(value, self.left.dtype): # GH#18295 left_insert = right_insert = value else: diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index d2e4974741b88..0b4aab0ac9d88 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -608,6 +608,9 @@ def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool: return not isinstance(obj, np.timedelta64) if dtype.kind == "m": return not isinstance(obj, np.datetime64) + if dtype.kind in ["i", "u", "f", "c"]: + # Numeric + return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64)) # must be PeriodDType return not isinstance(obj, (np.datetime64, np.timedelta64)) diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 026f128bae4be..49a70600c09fa 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -28,7 +28,7 @@ ABCSeries, ABCUInt64Index, ) -from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas.core import algorithms import pandas.core.common as com @@ -164,7 +164,12 @@ def is_all_dates(self) -> bool: def insert(self, loc: int, item): # treat NA values as nans: if is_scalar(item) and isna(item): - item = self._na_value + if is_valid_nat_for_dtype(item, self.dtype): + item = self._na_value + else: + # NaT, np.datetime64("NaT"), np.timedelta64("NaT") + return self.astype(object).insert(loc, item) + return super().insert(loc, item) def _union(self, other, sort): diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 42849e0bbb5c7..734c98af3d058 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -204,11 +204,20 @@ def test_insert(self, data): # GH 18295 (test missing) na_idx = IntervalIndex([np.nan], closed=data.closed) - for na in (np.nan, pd.NaT, None): + for na in [np.nan, None, pd.NA]: expected = data[:1].append(na_idx).append(data[1:]) result = data.insert(1, na) tm.assert_index_equal(result, expected) + if data.left.dtype.kind not in ["m", "M"]: + # trying to insert pd.NaT into a numeric-dtyped Index should cast/raise + msg = "can only insert Interval objects and NA into an IntervalIndex" + with pytest.raises(ValueError, match=msg): + result = data.insert(1, pd.NaT) + else: + result = data.insert(1, pd.NaT) + tm.assert_index_equal(result, expected) + def test_is_unique_interval(self, closed): """ Interval specific tests for is_unique in addition to base class tests diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 172cd4a106ac1..899c8cbc0425d 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -100,10 +100,14 @@ def test_insert(self): # GH 18295 (test missing) expected = Float64Index([0, np.nan, 1, 2, 3, 4]) - for na in (np.nan, pd.NaT, None): + for na in [np.nan, None, pd.NA]: result = RangeIndex(5).insert(1, na) tm.assert_index_equal(result, expected) + result = RangeIndex(5).insert(1, pd.NaT) + expected = pd.Index([0, pd.NaT, 1, 2, 3, 4], dtype=object) + tm.assert_index_equal(result, expected) + def test_delete(self): idx = RangeIndex(5, name="Foo") diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index bbd72b2ac5d60..e1623a14c333e 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -84,10 +84,14 @@ def test_index_groupby(self): expected = {ex_keys[0]: idx[[0, 5]], ex_keys[1]: idx[[1, 4]]} tm.assert_dict_equal(idx.groupby(to_groupby), expected) - def test_insert(self, nulls_fixture): + def test_insert_na(self, nulls_fixture): # GH 18295 (test missing) index = self.create_index() - expected = Float64Index([index[0], np.nan] + list(index[1:])) + + if nulls_fixture is pd.NaT: + expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) + else: + expected = Float64Index([index[0], np.nan] + list(index[1:])) result = index.insert(1, nulls_fixture) tm.assert_index_equal(result, expected) From d6678d194b2942fc060a36cf171cadc3d9bc414a Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Thu, 17 Sep 2020 16:21:40 -0700 Subject: [PATCH 594/632] REF: de-duplicate IntervalIndex compat code (#36372) --- pandas/core/indexes/base.py | 17 +++++++++++++---- pandas/core/indexes/interval.py | 34 +++------------------------------ pandas/core/indexing.py | 2 +- 3 files changed, 17 insertions(+), 36 deletions(-) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 11490e2e0be29..222ae589ea7fc 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3316,7 +3316,7 @@ def _can_reindex(self, indexer): ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates - if not self.is_unique and len(indexer): + if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex from a duplicate axis") def reindex(self, target, method=None, level=None, limit=None, tolerance=None): @@ -3360,8 +3360,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): if self.equals(target): indexer = None else: - # check is_overlapping for IntervalIndex compat - if self.is_unique and not getattr(self, "is_overlapping", False): + if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) @@ -4759,11 +4758,21 @@ def get_indexer_for(self, target, **kwargs): numpy.ndarray List of indices. """ - if self.is_unique: + if self._index_as_unique: return self.get_indexer(target, **kwargs) indexer, _ = self.get_indexer_non_unique(target, **kwargs) return indexer + @property + def _index_as_unique(self): + """ + Whether we should treat this as unique for the sake of + get_indexer vs get_indexer_non_unique. + + For IntervalIndex compat. + """ + return self.is_unique + def _maybe_promote(self, other: "Index"): """ When dealing with an object-dtype Index and a non-object Index, see diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9ef584f5b7fbc..2f43787919faa 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -516,22 +516,6 @@ def is_overlapping(self) -> bool: # GH 23309 return self._engine.is_overlapping - def _can_reindex(self, indexer: np.ndarray) -> None: - """ - Check if we are allowing reindexing with this particular indexer. - - Parameters - ---------- - indexer : an integer indexer - - Raises - ------ - ValueError if its a duplicate axis - """ - # trying to reindex on an axis with duplicates - if self.is_overlapping and len(indexer): - raise ValueError("cannot reindex from an overlapping axis") - def _needs_i8_conversion(self, key) -> bool: """ Check if a given key needs i8 conversion. Conversion is necessary for @@ -839,21 +823,9 @@ def get_indexer_non_unique( return ensure_platform_int(indexer), ensure_platform_int(missing) - def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray: - """ - Guaranteed return of an indexer even when overlapping. - - This dispatches to get_indexer or get_indexer_non_unique - as appropriate. - - Returns - ------- - numpy.ndarray - List of indices. - """ - if self.is_overlapping: - return self.get_indexer_non_unique(target)[0] - return self.get_indexer(target, **kwargs) + @property + def _index_as_unique(self): + return not self.is_overlapping def _convert_slice_indexer(self, key: slice, kind: str): if not (key.step is None or key.step == 1): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 9ecad335e2c3c..5f57fe1c9a56a 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1256,7 +1256,7 @@ def _get_listlike_indexer(self, key, axis: int, raise_missing: bool = False): ) return ax[indexer], indexer - if ax.is_unique and not getattr(ax, "is_overlapping", False): + if ax._index_as_unique: indexer = ax.get_indexer_for(keyarr) keyarr = ax.reindex(keyarr)[0] else: From 87974c00fa9ac79c94cd97faabf518926ce4d940 Mon Sep 17 00:00:00 2001 From: lacrosse91 Date: Fri, 18 Sep 2020 15:55:29 +0900 Subject: [PATCH 595/632] remove trailing comma (#36441) --- pandas/tests/groupby/transform/test_numba.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pandas/tests/groupby/transform/test_numba.py b/pandas/tests/groupby/transform/test_numba.py index fcaa5ab13599a..3a184bdd007c7 100644 --- a/pandas/tests/groupby/transform/test_numba.py +++ b/pandas/tests/groupby/transform/test_numba.py @@ -131,7 +131,7 @@ def func_1(values, index): @td.skip_if_no("numba", "0.46.0") @pytest.mark.parametrize( - "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}], + "agg_func", [["min", "max"], "min", {"B": ["min", "max"], "C": "sum"}] ) def test_multifunc_notimplimented(agg_func): data = DataFrame( From 1b895ef194401af9d2e137bb3fb1f0e680e99dfe Mon Sep 17 00:00:00 2001 From: Gautham <41098605+ahgamut@users.noreply.github.com> Date: Fri, 18 Sep 2020 12:50:05 +0000 Subject: [PATCH 596/632] DOC: read_excel skiprows documentation matches read_csv (#36435) (#36437) * DOC: updated read_excel skiprows documentation to match read_csv (GH36435) * TST: updated read_excel test with skiprows as int, callable (GH36435) --- pandas/io/excel/_base.py | 8 ++++++-- pandas/tests/io/excel/test_readers.py | 27 ++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 45da2d7d28fab..604b7e12ec243 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -120,8 +120,12 @@ Values to consider as True. false_values : list, default None Values to consider as False. -skiprows : list-like - Rows to skip at the beginning (0-indexed). +skiprows : list-like, int, or callable, optional + Line numbers to skip (0-indexed) or number of lines to skip (int) at the + start of the file. If callable, the callable function will be evaluated + against the row indices, returning True if the row should be skipped and + False otherwise. An example of a valid callable argument would be ``lambda + x: x in [0, 2]``. nrows : int, default None Number of rows to parse. na_values : scalar, str, list-like, or dict, default None diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 33467be42dfd9..4bdcc5b327fa7 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -894,7 +894,7 @@ def test_read_excel_bool_header_arg(self, read_ext): with pytest.raises(TypeError, match=msg): pd.read_excel("test1" + read_ext, header=arg) - def test_read_excel_skiprows_list(self, read_ext): + def test_read_excel_skiprows(self, read_ext): # GH 4903 if pd.read_excel.keywords["engine"] == "pyxlsb": pytest.xfail("Sheets containing datetimes not supported by pyxlsb") @@ -920,6 +920,31 @@ def test_read_excel_skiprows_list(self, read_ext): ) tm.assert_frame_equal(actual, expected) + # GH36435 + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x in [0, 2], + ) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=3, + names=["a", "b", "c", "d"], + ) + expected = DataFrame( + [ + # [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + tm.assert_frame_equal(actual, expected) + def test_read_excel_nrows(self, read_ext): # GH 16645 num_rows_to_pull = 5 From 46ad4b76898f6296af2c1fb830532932d485d20c Mon Sep 17 00:00:00 2001 From: lacrosse91 Date: Fri, 18 Sep 2020 22:00:15 +0900 Subject: [PATCH 597/632] CLN: 35925 rm trailing commas (#36446) --- pandas/tests/indexes/base_class/test_indexing.py | 2 +- pandas/tests/indexes/test_common.py | 4 +--- pandas/tests/indexes/test_numeric.py | 4 +--- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/pandas/tests/indexes/base_class/test_indexing.py b/pandas/tests/indexes/base_class/test_indexing.py index 196c0401a72be..b2fa8f31ee5ec 100644 --- a/pandas/tests/indexes/base_class/test_indexing.py +++ b/pandas/tests/indexes/base_class/test_indexing.py @@ -14,7 +14,7 @@ def test_get_slice_bounds_within(self, kind, side, expected): @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize( - "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)], + "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)] ) def test_get_slice_bounds_outside(self, kind, side, expected, data, bound): index = Index(data) diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index aa6b395176b06..675ae388a28a4 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -411,9 +411,7 @@ def test_sort_values_invalid_na_position(index_with_missing, na_position): pytest.xfail("missing value sorting order not defined for index type") if na_position not in ["first", "last"]: - with pytest.raises( - ValueError, match=f"invalid na_position: {na_position}", - ): + with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): index_with_missing.sort_values(na_position=na_position) diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index e1623a14c333e..7fa7a571d2571 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -699,9 +699,7 @@ def test_get_slice_bounds_within(self, kind, side, expected): @pytest.mark.parametrize("kind", ["getitem", "loc", None]) @pytest.mark.parametrize("side", ["left", "right"]) - @pytest.mark.parametrize( - "bound, expected", [(-1, 0), (10, 6)], - ) + @pytest.mark.parametrize("bound, expected", [(-1, 0), (10, 6)]) def test_get_slice_bounds_outside(self, kind, side, expected, bound): index = Index(range(6)) result = index.get_slice_bound(bound, kind=kind, side=side) From 1f89b64da6b811cca4be8f9e9368d6cafacd5dbe Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 18 Sep 2020 06:01:34 -0700 Subject: [PATCH 598/632] REF: collect IntervalArray methods by topic (#36438) --- pandas/core/arrays/datetimelike.py | 68 +++--- pandas/core/arrays/interval.py | 370 +++++++++++++++-------------- 2 files changed, 230 insertions(+), 208 deletions(-) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index a5b91afac338d..026aad5ad6eb7 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -67,6 +67,15 @@ DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType] +class InvalidComparison(Exception): + """ + Raised by _validate_comparison_value to indicate to caller it should + return invalid_comparison. + """ + + pass + + def _datetimelike_array_cmp(cls, op): """ Wrap comparison operations to convert Timestamp/Timedelta/Period-like to @@ -75,36 +84,6 @@ def _datetimelike_array_cmp(cls, op): opname = f"__{op.__name__}__" nat_result = opname == "__ne__" - class InvalidComparison(Exception): - pass - - def _validate_comparison_value(self, other): - if isinstance(other, str): - try: - # GH#18435 strings get a pass from tzawareness compat - other = self._scalar_from_string(other) - except ValueError: - # failed to parse as Timestamp/Timedelta/Period - raise InvalidComparison(other) - - if isinstance(other, self._recognized_scalars) or other is NaT: - other = self._scalar_type(other) - self._check_compatible_with(other) - - elif not is_list_like(other): - raise InvalidComparison(other) - - elif len(other) != len(self): - raise ValueError("Lengths must match") - - else: - try: - other = self._validate_listlike(other, opname, allow_object=True) - except TypeError as err: - raise InvalidComparison(other) from err - - return other - @unpack_zerodim_and_defer(opname) def wrapper(self, other): if self.ndim > 1 and getattr(other, "shape", None) == self.shape: @@ -112,7 +91,7 @@ def wrapper(self, other): return op(self.ravel(), other.ravel()).reshape(self.shape) try: - other = _validate_comparison_value(self, other) + other = self._validate_comparison_value(other, opname) except InvalidComparison: return invalid_comparison(self, other, op) @@ -696,6 +675,33 @@ def _from_factorized(cls, values, original): # Validation Methods # TODO: try to de-duplicate these, ensure identical behavior + def _validate_comparison_value(self, other, opname: str): + if isinstance(other, str): + try: + # GH#18435 strings get a pass from tzawareness compat + other = self._scalar_from_string(other) + except ValueError: + # failed to parse as Timestamp/Timedelta/Period + raise InvalidComparison(other) + + if isinstance(other, self._recognized_scalars) or other is NaT: + other = self._scalar_type(other) # type: ignore[call-arg] + self._check_compatible_with(other) + + elif not is_list_like(other): + raise InvalidComparison(other) + + elif len(other) != len(self): + raise ValueError("Lengths must match") + + else: + try: + other = self._validate_listlike(other, opname, allow_object=True) + except TypeError as err: + raise InvalidComparison(other) from err + + return other + def _validate_fill_value(self, fill_value): """ If a fill_value is passed to `take` convert it to an i8 representation, diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 706b089e929a9..ff9dd3f2a85bc 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -150,6 +150,9 @@ class IntervalArray(IntervalMixin, ExtensionArray): can_hold_na = True _na_value = _fill_value = np.nan + # --------------------------------------------------------------------- + # Constructors + def __new__( cls, data, @@ -263,32 +266,32 @@ def _from_factorized(cls, values, original): _interval_shared_docs["from_breaks"] = textwrap.dedent( """ - Construct an %(klass)s from an array of splits. + Construct an %(klass)s from an array of splits. - Parameters - ---------- - breaks : array-like (1-dimensional) - Left and right bounds for each interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither. - copy : bool, default False - Copy the data. - dtype : dtype or None, default None - If None, dtype will be inferred. + Parameters + ---------- + breaks : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : bool, default False + Copy the data. + dtype : dtype or None, default None + If None, dtype will be inferred. - Returns - ------- - %(klass)s + Returns + ------- + %(klass)s - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - %(klass)s.from_arrays : Construct from a left and right array. - %(klass)s.from_tuples : Construct from a sequence of tuples. + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_arrays : Construct from a left and right array. + %(klass)s.from_tuples : Construct from a sequence of tuples. - %(examples)s\ - """ + %(examples)s\ + """ ) @classmethod @@ -387,34 +390,34 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None): _interval_shared_docs["from_tuples"] = textwrap.dedent( """ - Construct an %(klass)s from an array-like of tuples. + Construct an %(klass)s from an array-like of tuples. - Parameters - ---------- - data : array-like (1-dimensional) - Array of tuples. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the intervals are closed on the left-side, right-side, both - or neither. - copy : bool, default False - By-default copy the data, this is compat only and ignored. - dtype : dtype or None, default None - If None, dtype will be inferred. + Parameters + ---------- + data : array-like (1-dimensional) + Array of tuples. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : bool, default False + By-default copy the data, this is compat only and ignored. + dtype : dtype or None, default None + If None, dtype will be inferred. - Returns - ------- - %(klass)s + Returns + ------- + %(klass)s - See Also - -------- - interval_range : Function to create a fixed frequency IntervalIndex. - %(klass)s.from_arrays : Construct an %(klass)s from a left and - right array. - %(klass)s.from_breaks : Construct an %(klass)s from an array of - splits. + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex. + %(klass)s.from_arrays : Construct an %(klass)s from a left and + right array. + %(klass)s.from_breaks : Construct an %(klass)s from an array of + splits. - %(examples)s\ - """ + %(examples)s\ + """ ) @classmethod @@ -489,9 +492,38 @@ def _validate(self): msg = "left side of interval must be <= right side" raise ValueError(msg) - # --------- - # Interface - # --------- + def _shallow_copy(self, left, right): + """ + Return a new IntervalArray with the replacement attributes + + Parameters + ---------- + left : Index + Values to be used for the left-side of the intervals. + right : Index + Values to be used for the right-side of the intervals. + """ + return self._simple_new(left, right, closed=self.closed, verify_integrity=False) + + # --------------------------------------------------------------------- + # Descriptive + + @property + def dtype(self): + return IntervalDtype(self.left.dtype) + + @property + def nbytes(self) -> int: + return self.left.nbytes + self.right.nbytes + + @property + def size(self) -> int: + # Avoid materializing self.values + return self.left.size + + # --------------------------------------------------------------------- + # EA Interface + def __iter__(self): return iter(np.asarray(self)) @@ -646,10 +678,6 @@ def fillna(self, value=None, method=None, limit=None): right = self.right.fillna(value=value_right) return self._shallow_copy(left, right) - @property - def dtype(self): - return IntervalDtype(self.left.dtype) - def astype(self, dtype, copy=True): """ Cast to an ExtensionArray or NumPy array with dtype 'dtype'. @@ -722,19 +750,6 @@ def _concat_same_type(cls, to_concat): right = np.concatenate([interval.right for interval in to_concat]) return cls._simple_new(left, right, closed=closed, copy=False) - def _shallow_copy(self, left, right): - """ - Return a new IntervalArray with the replacement attributes - - Parameters - ---------- - left : Index - Values to be used for the left-side of the intervals. - right : Index - Values to be used for the right-side of the intervals. - """ - return self._simple_new(left, right, closed=self.closed, verify_integrity=False) - def copy(self): """ Return a copy of the array. @@ -752,15 +767,6 @@ def copy(self): def isna(self): return isna(self.left) - @property - def nbytes(self) -> int: - return self.left.nbytes + self.right.nbytes - - @property - def size(self) -> int: - # Avoid materializing self.values - return self.left.size - def shift(self, periods: int = 1, fill_value: object = None) -> "IntervalArray": if not len(self) or periods == 0: return self.copy() @@ -912,7 +918,8 @@ def value_counts(self, dropna=True): # TODO: implement this is a non-naive way! return value_counts(np.asarray(self), dropna=dropna) - # Formatting + # --------------------------------------------------------------------- + # Rendering Methods def _format_data(self): @@ -966,6 +973,9 @@ def _format_space(self): space = " " * (len(type(self).__name__) + 1) return f"\n{space}" + # --------------------------------------------------------------------- + # Vectorized Interval Properties/Attributes + @property def left(self): """ @@ -982,6 +992,109 @@ def right(self): """ return self._right + @property + def length(self): + """ + Return an Index with entries denoting the length of each Interval in + the IntervalArray. + """ + try: + return self.right - self.left + except TypeError as err: + # length not defined for some types, e.g. string + msg = ( + "IntervalArray contains Intervals without defined length, " + "e.g. Intervals with string endpoints" + ) + raise TypeError(msg) from err + + @property + def mid(self): + """ + Return the midpoint of each Interval in the IntervalArray as an Index. + """ + try: + return 0.5 * (self.left + self.right) + except TypeError: + # datetime safe version + return self.left + 0.5 * self.length + + _interval_shared_docs["overlaps"] = textwrap.dedent( + """ + Check elementwise if an Interval overlaps the values in the %(klass)s. + + Two intervals overlap if they share a common point, including closed + endpoints. Intervals that only have an open endpoint in common do not + overlap. + + .. versionadded:: 0.24.0 + + Parameters + ---------- + other : %(klass)s + Interval to check against for an overlap. + + Returns + ------- + ndarray + Boolean array positionally indicating where an overlap occurs. + + See Also + -------- + Interval.overlaps : Check whether two Interval objects overlap. + + Examples + -------- + %(examples)s + >>> intervals.overlaps(pd.Interval(0.5, 1.5)) + array([ True, True, False]) + + Intervals that share closed endpoints overlap: + + >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) + array([ True, True, True]) + + Intervals that only have an open endpoint in common do not overlap: + + >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) + array([False, True, False]) + """ + ) + + @Appender( + _interval_shared_docs["overlaps"] + % dict( + klass="IntervalArray", + examples=textwrap.dedent( + """\ + >>> data = [(0, 1), (1, 3), (2, 4)] + >>> intervals = pd.arrays.IntervalArray.from_tuples(data) + >>> intervals + + [(0, 1], (1, 3], (2, 4]] + Length: 3, closed: right, dtype: interval[int64] + """ + ), + ) + ) + def overlaps(self, other): + if isinstance(other, (IntervalArray, ABCIntervalIndex)): + raise NotImplementedError + elif not isinstance(other, Interval): + msg = f"`other` must be Interval-like, got {type(other).__name__}" + raise TypeError(msg) + + # equality is okay if both endpoints are closed (overlap at a point) + op1 = le if (self.closed_left and other.closed_right) else lt + op2 = le if (other.closed_left and self.closed_right) else lt + + # overlaps is equivalent negation of two interval being disjoint: + # disjoint = (A.left > B.right) or (B.left > A.right) + # (simplifying the negation allows this to be done in less operations) + return op1(self.left, other.right) & op2(other.left, self.right) + + # --------------------------------------------------------------------- + @property def closed(self): """ @@ -1041,33 +1154,6 @@ def set_closed(self, closed): left=self.left, right=self.right, closed=closed, verify_integrity=False ) - @property - def length(self): - """ - Return an Index with entries denoting the length of each Interval in - the IntervalArray. - """ - try: - return self.right - self.left - except TypeError as err: - # length not defined for some types, e.g. string - msg = ( - "IntervalArray contains Intervals without defined length, " - "e.g. Intervals with string endpoints" - ) - raise TypeError(msg) from err - - @property - def mid(self): - """ - Return the midpoint of each Interval in the IntervalArray as an Index. - """ - try: - return 0.5 * (self.left + self.right) - except TypeError: - # datetime safe version - return self.left + 0.5 * self.length - _interval_shared_docs[ "is_non_overlapping_monotonic" ] = """ @@ -1102,7 +1188,9 @@ def is_non_overlapping_monotonic(self): or (self.left[:-1] >= self.right[1:]).all() ) + # --------------------------------------------------------------------- # Conversion + def __array__(self, dtype=None) -> np.ndarray: """ Return the IntervalArray's data as a numpy array of Interval @@ -1200,6 +1288,8 @@ def to_tuples(self, na_tuple=True): tuples = np.where(~self.isna(), tuples, np.nan) return tuples + # --------------------------------------------------------------------- + @Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs) def repeat(self, repeats, axis=None): nv.validate_repeat(tuple(), dict(axis=axis)) @@ -1262,80 +1352,6 @@ def contains(self, other): other < self.right if self.open_right else other <= self.right ) - _interval_shared_docs["overlaps"] = textwrap.dedent( - """ - Check elementwise if an Interval overlaps the values in the %(klass)s. - - Two intervals overlap if they share a common point, including closed - endpoints. Intervals that only have an open endpoint in common do not - overlap. - - .. versionadded:: 0.24.0 - - Parameters - ---------- - other : %(klass)s - Interval to check against for an overlap. - - Returns - ------- - ndarray - Boolean array positionally indicating where an overlap occurs. - - See Also - -------- - Interval.overlaps : Check whether two Interval objects overlap. - - Examples - -------- - %(examples)s - >>> intervals.overlaps(pd.Interval(0.5, 1.5)) - array([ True, True, False]) - - Intervals that share closed endpoints overlap: - - >>> intervals.overlaps(pd.Interval(1, 3, closed='left')) - array([ True, True, True]) - - Intervals that only have an open endpoint in common do not overlap: - - >>> intervals.overlaps(pd.Interval(1, 2, closed='right')) - array([False, True, False]) - """ - ) - - @Appender( - _interval_shared_docs["overlaps"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( - """\ - >>> data = [(0, 1), (1, 3), (2, 4)] - >>> intervals = pd.arrays.IntervalArray.from_tuples(data) - >>> intervals - - [(0, 1], (1, 3], (2, 4]] - Length: 3, closed: right, dtype: interval[int64] - """ - ), - ) - ) - def overlaps(self, other): - if isinstance(other, (IntervalArray, ABCIntervalIndex)): - raise NotImplementedError - elif not isinstance(other, Interval): - msg = f"`other` must be Interval-like, got {type(other).__name__}" - raise TypeError(msg) - - # equality is okay if both endpoints are closed (overlap at a point) - op1 = le if (self.closed_left and other.closed_right) else lt - op2 = le if (other.closed_left and self.closed_right) else lt - - # overlaps is equivalent negation of two interval being disjoint: - # disjoint = (A.left > B.right) or (B.left > A.right) - # (simplifying the negation allows this to be done in less operations) - return op1(self.left, other.right) & op2(other.left, self.right) - def maybe_convert_platform_interval(values): """ From a0e0571ba4d843b0b462f1894d06a81b77925af2 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 18 Sep 2020 06:05:09 -0700 Subject: [PATCH 599/632] REF: share insert between DTI/TDI/PI (#36439) --- pandas/core/indexes/datetimelike.py | 82 ++++++++++++++++------------- pandas/core/indexes/period.py | 6 +-- 2 files changed, 47 insertions(+), 41 deletions(-) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 122977eee99fb..1ab40a76b30ff 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -575,6 +575,50 @@ def delete(self, loc): arr = type(self._data)._simple_new(new_i8s, dtype=self.dtype, freq=freq) return type(self)._simple_new(arr, name=self.name) + def insert(self, loc: int, item): + """ + Make new Index inserting new item at location + + Parameters + ---------- + loc : int + item : object + if not either a Python datetime or a numpy integer-like, returned + Index dtype will be object rather than datetime. + + Returns + ------- + new_index : Index + """ + item = self._data._validate_insert_value(item) + + freq = None + if is_period_dtype(self.dtype): + freq = self.freq + elif self.freq is not None: + # freq can be preserved on edge cases + if self.size: + if item is NaT: + pass + elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]: + freq = self.freq + elif (loc == len(self)) and item - self.freq == self[-1]: + freq = self.freq + else: + # Adding a single item to an empty index may preserve freq + if self.freq.is_on_offset(item): + freq = self.freq + + arr = self._data + item = arr._unbox_scalar(item) + item = arr._rebox_native(item) + + new_values = np.concatenate([arr._ndarray[:loc], [item], arr._ndarray[loc:]]) + new_arr = self._data._from_backing_data(new_values) + new_arr._freq = freq + + return type(self)._simple_new(new_arr, name=self.name) + # -------------------------------------------------------------------- # Join/Set Methods @@ -895,45 +939,11 @@ def _maybe_utc_convert(self, other): # -------------------------------------------------------------------- # List-Like Methods + @Appender(DatetimeIndexOpsMixin.insert.__doc__) def insert(self, loc, item): - """ - Make new Index inserting new item at location - - Parameters - ---------- - loc : int - item : object - if not either a Python datetime or a numpy integer-like, returned - Index dtype will be object rather than datetime. - - Returns - ------- - new_index : Index - """ if isinstance(item, str): # TODO: Why are strings special? # TODO: Should we attempt _scalar_from_string? return self.astype(object).insert(loc, item) - item = self._data._validate_insert_value(item) - - freq = None - # check freq can be preserved on edge cases - if self.freq is not None: - if self.size: - if item is NaT: - pass - elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]: - freq = self.freq - elif (loc == len(self)) and item - self.freq == self[-1]: - freq = self.freq - else: - # Adding a single item to an empty index may preserve freq - if self.freq.is_on_offset(item): - freq = self.freq - - item = self._data._unbox_scalar(item) - - new_i8s = np.concatenate([self[:loc].asi8, [item], self[loc:].asi8]) - arr = type(self._data)._simple_new(new_i8s, dtype=self.dtype, freq=freq) - return type(self)._simple_new(arr, name=self.name) + return DatetimeIndexOpsMixin.insert(self, loc, item) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 42dce1bd53f22..900d3f9f1866b 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -436,11 +436,7 @@ def insert(self, loc, item): if not isinstance(item, Period) or self.freq != item.freq: return self.astype(object).insert(loc, item) - i8result = np.concatenate( - (self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8) - ) - arr = type(self._data)._simple_new(i8result, dtype=self.dtype) - return type(self)._simple_new(arr, name=self.name) + return DatetimeIndexOpsMixin.insert(self, loc, item) def join(self, other, how="left", level=None, return_indexers=False, sort=False): """ From a44a7048ef4aa22ef17bf8d87e18f707c2d19925 Mon Sep 17 00:00:00 2001 From: Marco Gorelli Date: Fri, 18 Sep 2020 20:33:01 +0100 Subject: [PATCH 600/632] CLN Upgrade pandas/core syntax (#36453) --- pandas/core/arrays/datetimes.py | 3 +-- pandas/core/arrays/sparse/array.py | 2 +- pandas/core/common.py | 9 +++------ pandas/core/computation/expr.py | 2 +- pandas/core/computation/pytables.py | 2 +- pandas/core/generic.py | 11 +++-------- pandas/core/groupby/base.py | 11 ++--------- pandas/core/groupby/generic.py | 2 +- pandas/core/indexes/base.py | 8 +++----- pandas/core/indexes/datetimelike.py | 2 +- pandas/core/reshape/merge.py | 2 +- 11 files changed, 18 insertions(+), 36 deletions(-) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 2073f110d536f..b1f98199f9fba 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -570,8 +570,7 @@ def __iter__(self): converted = ints_to_pydatetime( data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp" ) - for v in converted: - yield v + yield from converted def astype(self, dtype, copy=True): # We handle diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index 853f7bb0b0d81..c88af77ea6189 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -1427,7 +1427,7 @@ def sparse_arithmetic_method(self, other): # TODO: look into _wrap_result if len(self) != len(other): raise AssertionError( - (f"length mismatch: {len(self)} vs. {len(other)}") + f"length mismatch: {len(self)} vs. {len(other)}" ) if not isinstance(other, SparseArray): dtype = getattr(other, "dtype", None) diff --git a/pandas/core/common.py b/pandas/core/common.py index 968fb180abcd0..b860c83f89cbc 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -62,8 +62,7 @@ def flatten(l): """ for el in l: if iterable_not_string(el): - for s in flatten(el): - yield s + yield from flatten(el) else: yield el @@ -434,10 +433,8 @@ def random_state(state=None): return np.random else: raise ValueError( - ( - "random_state must be an integer, array-like, a BitGenerator, " - "a numpy RandomState, or None" - ) + "random_state must be an integer, array-like, a BitGenerator, " + "a numpy RandomState, or None" ) diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 09fc53716dda9..8c56f02c8d3cc 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -153,7 +153,7 @@ def _preparse( the ``tokenize`` module and ``tokval`` is a string. """ assert callable(f), "f must be callable" - return tokenize.untokenize((f(x) for x in tokenize_string(source))) + return tokenize.untokenize(f(x) for x in tokenize_string(source)) def _is_type(t): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 8dd7c1a22d0ae..d876c655421ef 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -554,7 +554,7 @@ def __init__( else: w = _validate_where(w) where[idx] = w - _where = " & ".join((f"({w})" for w in com.flatten(where))) + _where = " & ".join(f"({w})" for w in com.flatten(where)) else: _where = where diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cc18b8681200f..0b9021b094cd7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1681,10 +1681,7 @@ def _get_label_or_level_values(self, key: str, axis: int = 0) -> np.ndarray: label_axis_name = "column" if axis == 0 else "index" raise ValueError( - ( - f"The {label_axis_name} label '{key}' " - f"is not unique.{multi_message}" - ) + f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values @@ -1725,10 +1722,8 @@ def _drop_labels_or_levels(self, keys, axis: int = 0): if invalid_keys: raise ValueError( - ( - "The following keys are not valid labels or " - f"levels for axis {axis}: {invalid_keys}" - ) + "The following keys are not valid labels or " + f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 9cfd13f95ca0e..2387427d15670 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -93,15 +93,8 @@ def _gotitem(self, key, ndim, subset=None): ) series_apply_allowlist = ( - ( - common_apply_allowlist - | { - "nlargest", - "nsmallest", - "is_monotonic_increasing", - "is_monotonic_decreasing", - } - ) + common_apply_allowlist + | {"nlargest", "nsmallest", "is_monotonic_increasing", "is_monotonic_decreasing"} ) | frozenset(["dtype", "unique"]) dataframe_apply_allowlist = common_apply_allowlist | frozenset(["dtypes", "corrwith"]) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index a931221ef3ce1..bbccd22f2ae85 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1212,7 +1212,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): return result else: - all_indexed_same = all_indexes_same((x.index for x in values)) + all_indexed_same = all_indexes_same(x.index for x in values) # GH3596 # provide a reduction (Frame -> Series) if groups are diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 222ae589ea7fc..525caab7564a3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -504,7 +504,7 @@ def _maybe_check_unique(self): if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() - msg += "\n{}".format(duplicates) + msg += f"\n{duplicates}" raise DuplicateLabelError(msg) @@ -4315,10 +4315,8 @@ def identical(self, other) -> bool: return ( self.equals(other) and all( - ( - getattr(self, c, None) == getattr(other, c, None) - for c in self._comparables - ) + getattr(self, c, None) == getattr(other, c, None) + for c in self._comparables ) and type(self) == type(other) ) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1ab40a76b30ff..5aa72bb838756 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -398,7 +398,7 @@ def _partial_date_slice( if len(self) and ( (use_lhs and t1 < self[0] and t2 < self[0]) - or ((use_rhs and t1 > self[-1] and t2 > self[-1])) + or (use_rhs and t1 > self[-1] and t2 > self[-1]) ): # we are out of range raise KeyError diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index d95355589fd0c..5a6518995c554 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1837,7 +1837,7 @@ def _get_single_indexer(join_key, index, sort: bool = False): def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys, sort: bool = False): if len(join_keys) > 1: if not ( - (isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels) + isinstance(right_ax, MultiIndex) and len(join_keys) == right_ax.nlevels ): raise AssertionError( "If more than one join key is given then " From ec93a02557581c9a7774d2400ddd7ef85855eaf6 Mon Sep 17 00:00:00 2001 From: Fangchen Li Date: Fri, 18 Sep 2020 16:30:41 -0500 Subject: [PATCH 601/632] CI: fix gbq test #36436 (#36443) --- ci/deps/travis-37-cov.yaml | 9 ++++----- ci/deps/travis-37-locale.yaml | 4 ++-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/ci/deps/travis-37-cov.yaml b/ci/deps/travis-37-cov.yaml index d031dc1cc062f..7d5104a58ce83 100644 --- a/ci/deps/travis-37-cov.yaml +++ b/ci/deps/travis-37-cov.yaml @@ -1,6 +1,5 @@ name: pandas-dev channels: - - defaults - conda-forge dependencies: - python=3.7.* @@ -15,7 +14,6 @@ dependencies: # pandas dependencies - beautifulsoup4 - botocore>=1.11 - - cython>=0.29.16 - dask - fastparquet>=0.3.2 - fsspec>=0.7.4 @@ -31,16 +29,18 @@ dependencies: - odfpy - openpyxl - pandas-gbq + - google-cloud-bigquery>=1.27.2 # GH 36436 - psycopg2 - pyarrow>=0.15.0 - - pymysql + - pymysql=0.7.11 - pytables - python-snappy + - python-dateutil - pytz - s3fs>=0.4.0 - scikit-learn - scipy - - sqlalchemy + - sqlalchemy=1.3.0 - statsmodels - xarray - xlrd @@ -51,5 +51,4 @@ dependencies: - brotlipy - coverage - pandas-datareader - - python-dateutil - pyxlsb diff --git a/ci/deps/travis-37-locale.yaml b/ci/deps/travis-37-locale.yaml index 8a0b5b043ceca..cd6341e80be24 100644 --- a/ci/deps/travis-37-locale.yaml +++ b/ci/deps/travis-37-locale.yaml @@ -25,10 +25,10 @@ dependencies: - numexpr - numpy - openpyxl - - pandas-gbq=0.12.0 + - pandas-gbq + - google-cloud-bigquery>=1.27.2 # GH 36436 - pyarrow>=0.17 - psycopg2=2.7 - - pyarrow>=0.15.0 # GH #35813 - pymysql=0.7.11 - pytables - python-dateutil From dea9ff3e1a1f5699f775d099827e2671009d40ea Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 18 Sep 2020 14:55:05 -0700 Subject: [PATCH 602/632] REGR: Series[numeric] comparison with str raising on numexpr path (#36440) --- doc/source/whatsnew/v1.1.3.rst | 2 +- pandas/core/indexes/base.py | 6 +++++- pandas/core/ops/array_ops.py | 5 +++++ pandas/tests/arithmetic/test_numeric.py | 20 ++++++++++++++++++++ pandas/tests/indexes/test_numpy_compat.py | 15 --------------- 5 files changed, 31 insertions(+), 17 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 3f8413bd492ca..1d386fa372ce1 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -34,7 +34,7 @@ Fixed regressions - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a tuple (:issue:`35534`) - Fixed regression in :meth:`Series.__getitem__` incorrectly raising when the input was a frozenset (:issue:`35747`) - Fixed regression in :meth:`read_excel` with ``engine="odf"`` caused ``UnboundLocalError`` in some cases where cells had nested child nodes (:issue:`36122`,:issue:`35802`) -- +- Fixed regression in :class:`DataFrame` and :class:`Series` comparisons between numeric arrays and strings (:issue:`35700`,:issue:`36377`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 525caab7564a3..9cd28974f9385 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -138,10 +138,14 @@ def cmp_method(self, other): with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) - else: + elif is_interval_dtype(self.dtype): with np.errstate(all="ignore"): result = op(self._values, np.asarray(other)) + else: + with np.errstate(all="ignore"): + result = ops.comparison_op(self._values, np.asarray(other), op) + if is_bool_dtype(result): return result return ops.invalid_comparison(self, other, op) diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index aab10cea33632..fd5f126051c53 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -23,6 +23,7 @@ is_bool_dtype, is_integer_dtype, is_list_like, + is_numeric_v_string_like, is_object_dtype, is_scalar, ) @@ -235,6 +236,10 @@ def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: else: res_values = np.zeros(lvalues.shape, dtype=bool) + elif is_numeric_v_string_like(lvalues, rvalues): + # GH#36377 going through the numexpr path would incorrectly raise + return invalid_comparison(lvalues, rvalues, op) + elif is_object_dtype(lvalues.dtype): res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index ecac08ffe3ba2..139401bdf5806 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -89,6 +89,26 @@ def test_compare_invalid(self): b.name = pd.Timestamp("2000-01-01") tm.assert_series_equal(a / b, 1 / (b / a)) + def test_numeric_cmp_string_numexpr_path(self, box): + # GH#36377, GH#35700 + xbox = box if box is not pd.Index else np.ndarray + + obj = pd.Series(np.random.randn(10 ** 5)) + obj = tm.box_expected(obj, box, transpose=False) + + result = obj == "a" + + expected = pd.Series(np.zeros(10 ** 5, dtype=bool)) + expected = tm.box_expected(expected, xbox, transpose=False) + tm.assert_equal(result, expected) + + result = obj != "a" + tm.assert_equal(result, ~expected) + + msg = "Invalid comparison between dtype=float64 and str" + with pytest.raises(TypeError, match=msg): + obj < "a" + # ------------------------------------------------------------------ # Numeric dtypes Arithmetic with Datetime/Timedelta Scalar diff --git a/pandas/tests/indexes/test_numpy_compat.py b/pandas/tests/indexes/test_numpy_compat.py index a83684464caf6..b71417b2a625d 100644 --- a/pandas/tests/indexes/test_numpy_compat.py +++ b/pandas/tests/indexes/test_numpy_compat.py @@ -114,18 +114,3 @@ def test_numpy_ufuncs_other(index, func): else: with pytest.raises(Exception): func(index) - - -def test_elementwise_comparison_warning(): - # https://github.com/pandas-dev/pandas/issues/22698#issuecomment-458968300 - # np.array([1, 2]) == 'a' returns False, and produces a - # FutureWarning that it'll be [False, False] in the future. - # We just want to ensure that comes through. - # When NumPy dev actually enforces this change, we'll need to skip - # this test. - idx = Index([1, 2]) - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = idx == "a" - - expected = np.array([False, False]) - tm.assert_numpy_array_equal(result, expected) From b03179cf3f5bd5c303b346d5c33c325cd4b260fc Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Fri, 18 Sep 2020 23:59:23 +0200 Subject: [PATCH 603/632] [DOC]: Add warning about rolling sums with large values (#36433) --- doc/source/user_guide/computation.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/doc/source/user_guide/computation.rst b/doc/source/user_guide/computation.rst index 151ef36be7c98..10e27606a1415 100644 --- a/doc/source/user_guide/computation.rst +++ b/doc/source/user_guide/computation.rst @@ -229,6 +229,15 @@ see the :ref:`groupby docs `. The API for window statistics is quite similar to the way one works with ``GroupBy`` objects, see the documentation :ref:`here `. +.. warning:: + + When using ``rolling()`` and an associated function the results are calculated with rolling sums. As a consequence + when having values differing with magnitude :math:`1/np.finfo(np.double).eps` this results in truncation. It must be + noted, that large values may have an impact on windows, which do not include these values. `Kahan summation + `__ is used + to compute the rolling sums to preserve accuracy as much as possible. The same holds true for ``Rolling.var()`` for + values differing with magnitude :math:`(1/np.finfo(np.double).eps)^{0.5}`. + We work with ``rolling``, ``expanding`` and ``exponentially weighted`` data through the corresponding objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expanding` and :class:`~pandas.core.window.ExponentialMovingWindow`. From d2e958b129e46e1d9eae2c2e19f02d2d934bb7cc Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Fri, 18 Sep 2020 17:08:57 -0500 Subject: [PATCH 604/632] CI: Auto-label PRs for review (#36349) --- .github/PULL_REQUEST_TEMPLATE.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 7c3870470f074..5e4d3b4ec38e4 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,9 @@ +--- + +labels: "Needs Review" + +--- + - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` From 56eb1670fe4e8dc103fbcf573abe0f84ed7e5583 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 18 Sep 2020 15:18:03 -0700 Subject: [PATCH 605/632] REF: _is_compatible_with_other -> _can_union_without_object_cast (#36384) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/indexes/base.py | 12 +++++-- pandas/core/indexes/datetimelike.py | 3 ++ pandas/core/indexes/numeric.py | 34 ++++++------------- pandas/tests/indexes/common.py | 18 ++++------ pandas/tests/indexes/datetimes/test_setops.py | 6 ++-- 6 files changed, 33 insertions(+), 42 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 6923b42d3340b..33a5b016a293f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -367,7 +367,7 @@ Other - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` incorrectly raising ``AssertionError`` instead of ``ValueError`` when invalid parameter combinations are passed (:issue:`36045`) - Bug in :meth:`DataFrame.replace` and :meth:`Series.replace` with numeric values and string ``to_replace`` (:issue:`34789`) - Bug in :meth:`Series.transform` would give incorrect results or raise when the argument ``func`` was dictionary (:issue:`35811`) -- +- Bug in :meth:`Index.union` behaving differently depending on whether operand is a :class:`Index` or other list-like (:issue:`36384`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9cd28974f9385..962a425afbd1e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2511,7 +2511,7 @@ def _union_incompatible_dtypes(self, other, sort): other = Index(other).astype(object, copy=False) return Index.union(this, other, sort=sort).astype(object, copy=False) - def _is_compatible_with_other(self, other) -> bool: + def _can_union_without_object_cast(self, other) -> bool: """ Check whether this and the other dtype are compatible with each other. Meaning a union can be formed between them without needing to be cast @@ -2587,8 +2587,9 @@ def union(self, other, sort=None): """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) + other = ensure_index(other) - if not self._is_compatible_with_other(other): + if not self._can_union_without_object_cast(other): return self._union_incompatible_dtypes(other, sort=sort) return self._union(other, sort=sort) @@ -5657,6 +5658,13 @@ def ensure_index( return MultiIndex.from_arrays(converted) else: + if isinstance(converted, np.ndarray) and converted.dtype == np.int64: + # Check for overflows if we should actually be uint64 + # xref GH#35481 + alt = np.asarray(index_like) + if alt.dtype == np.uint64: + converted = alt + index_like = converted else: # clean_index_list does the equivalent of copying diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5aa72bb838756..e2f59ceb41db5 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -622,6 +622,9 @@ def insert(self, loc: int, item): # -------------------------------------------------------------------- # Join/Set Methods + def _can_union_without_object_cast(self, other) -> bool: + return is_dtype_equal(self.dtype, other.dtype) + def _wrap_joined_index(self, joined: np.ndarray, other): assert other.dtype == self.dtype, (other.dtype, self.dtype) name = get_op_result_name(self, other) diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 49a70600c09fa..574c9adc31808 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -15,19 +15,14 @@ is_float, is_float_dtype, is_integer_dtype, + is_numeric_dtype, is_scalar, is_signed_integer_dtype, is_unsigned_integer_dtype, needs_i8_conversion, pandas_dtype, ) -from pandas.core.dtypes.generic import ( - ABCFloat64Index, - ABCInt64Index, - ABCRangeIndex, - ABCSeries, - ABCUInt64Index, -) +from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import is_valid_nat_for_dtype, isna from pandas.core import algorithms @@ -275,11 +270,9 @@ def _assert_safe_casting(cls, data, subarr): if not np.array_equal(data, subarr): raise TypeError("Unsafe NumPy casting, you must explicitly cast") - def _is_compatible_with_other(self, other) -> bool: - return super()._is_compatible_with_other(other) or all( - isinstance(obj, (ABCInt64Index, ABCFloat64Index, ABCRangeIndex)) - for obj in [self, other] - ) + def _can_union_without_object_cast(self, other) -> bool: + # See GH#26778, further casting may occur in NumericIndex._union + return other.dtype == "f8" or other.dtype == self.dtype Int64Index._add_numeric_methods() @@ -324,10 +317,9 @@ def _assert_safe_casting(cls, data, subarr): if not np.array_equal(data, subarr): raise TypeError("Unsafe NumPy casting, you must explicitly cast") - def _is_compatible_with_other(self, other) -> bool: - return super()._is_compatible_with_other(other) or all( - isinstance(obj, (ABCUInt64Index, ABCFloat64Index)) for obj in [self, other] - ) + def _can_union_without_object_cast(self, other) -> bool: + # See GH#26778, further casting may occur in NumericIndex._union + return other.dtype == "f8" or other.dtype == self.dtype UInt64Index._add_numeric_methods() @@ -432,13 +424,9 @@ def isin(self, values, level=None): self._validate_index_level(level) return algorithms.isin(np.array(self), values) - def _is_compatible_with_other(self, other) -> bool: - return super()._is_compatible_with_other(other) or all( - isinstance( - obj, (ABCInt64Index, ABCFloat64Index, ABCUInt64Index, ABCRangeIndex) - ) - for obj in [self, other] - ) + def _can_union_without_object_cast(self, other) -> bool: + # See GH#26778, further casting may occur in NumericIndex._union + return is_numeric_dtype(other.dtype) Float64Index._add_numeric_methods() diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index b01cafc9b0d5c..c40f7b1bc2120 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -5,7 +5,6 @@ import pytest from pandas._libs import iNaT -from pandas.compat.numpy import is_numpy_dev from pandas.errors import InvalidIndexError from pandas.core.dtypes.common import is_datetime64tz_dtype @@ -456,7 +455,7 @@ def test_set_ops_error_cases(self, case, method, index): with pytest.raises(TypeError, match=msg): getattr(index, method)(case) - def test_intersection_base(self, index, request): + def test_intersection_base(self, index): if isinstance(index, CategoricalIndex): return @@ -473,15 +472,6 @@ def test_intersection_base(self, index, request): # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - # https://github.com/pandas-dev/pandas/issues/35481 - if ( - is_numpy_dev - and isinstance(case, Series) - and isinstance(index, UInt64Index) - ): - mark = pytest.mark.xfail(reason="gh-35481") - request.node.add_marker(mark) - result = first.intersection(case) assert tm.equalContents(result, second) @@ -507,7 +497,11 @@ def test_union_base(self, index): for case in cases: if not isinstance(index, CategoricalIndex): result = first.union(case) - assert tm.equalContents(result, everything) + assert tm.equalContents(result, everything), ( + result, + everything, + type(case), + ) if isinstance(index, MultiIndex): msg = "other must be a MultiIndex or a list of tuples" diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index f19e78323ab23..102c8f97a8a6b 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -46,10 +46,8 @@ def test_union3(self, sort, box): first = everything[:5] second = everything[5:] - # GH 10149 - expected = ( - first.astype("O").union(pd.Index(second.values, dtype="O")).astype("O") - ) + # GH 10149 support listlike inputs other than Index objects + expected = first.union(second, sort=sort) case = box(second.values) result = first.union(case, sort=sort) tm.assert_index_equal(result, expected) From 4caafd449884ead2b6a1b573b99ed9303303f62f Mon Sep 17 00:00:00 2001 From: Gautham <41098605+ahgamut@users.noreply.github.com> Date: Fri, 18 Sep 2020 22:20:31 +0000 Subject: [PATCH 606/632] CLN: Update files (as per #36450) to Python 3.7+ syntax (#36457) --- pandas/_config/display.py | 2 +- pandas/_testing.py | 3 +- pandas/_vendored/typing_extensions.py | 10 ++--- pandas/_version.py | 6 +-- pandas/io/clipboard/__init__.py | 4 +- pandas/io/formats/excel.py | 6 +-- pandas/io/html.py | 2 +- pandas/io/json/_json.py | 2 +- pandas/io/pytables.py | 6 +-- pandas/io/stata.py | 2 +- pandas/plotting/_matplotlib/core.py | 2 +- .../arrays/categorical/test_constructors.py | 2 +- .../tests/arrays/integer/test_construction.py | 2 +- pandas/tests/extension/decimal/array.py | 2 +- pandas/tests/extension/test_categorical.py | 2 +- pandas/tests/frame/test_constructors.py | 2 +- pandas/tests/groupby/test_groupby.py | 4 +- pandas/tests/groupby/test_grouping.py | 2 +- pandas/tests/indexes/test_base.py | 2 +- pandas/tests/indexing/test_indexing.py | 2 +- pandas/tests/io/formats/test_format.py | 2 +- pandas/tests/io/formats/test_to_csv.py | 20 ++++----- pandas/tests/io/formats/test_to_html.py | 2 +- pandas/tests/io/formats/test_to_latex.py | 2 +- pandas/tests/io/json/test_ujson.py | 4 +- pandas/tests/io/parser/test_c_parser_only.py | 2 +- pandas/tests/io/parser/test_common.py | 2 +- pandas/tests/io/parser/test_encoding.py | 2 +- pandas/tests/io/parser/test_read_fwf.py | 4 +- pandas/tests/io/pytables/common.py | 2 +- pandas/tests/io/test_common.py | 6 +-- pandas/tests/io/test_html.py | 4 +- pandas/tests/io/test_sql.py | 3 +- pandas/tests/reshape/test_get_dummies.py | 2 +- pandas/tests/scalar/test_na_scalar.py | 6 +-- pandas/tests/series/test_analytics.py | 2 +- pandas/tests/series/test_constructors.py | 5 +-- pandas/tests/series/test_dtypes.py | 8 ++-- pandas/tests/series/test_io.py | 3 +- scripts/validate_rst_title_capitalization.py | 7 ++- scripts/validate_unwanted_patterns.py | 4 +- setup.py | 2 +- versioneer.py | 43 ++++++++----------- 43 files changed, 94 insertions(+), 108 deletions(-) diff --git a/pandas/_config/display.py b/pandas/_config/display.py index ef319f4447565..e4553a2107f87 100644 --- a/pandas/_config/display.py +++ b/pandas/_config/display.py @@ -22,7 +22,7 @@ def detect_console_encoding() -> str: encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding - except (AttributeError, IOError): + except (AttributeError, OSError): pass # try again for something better diff --git a/pandas/_testing.py b/pandas/_testing.py index 9db0c3496e290..cd34bec52daef 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -1960,8 +1960,7 @@ def index_subclass_makers_generator(): makeCategoricalIndex, makeMultiIndex, ] - for make_index_func in make_index_funcs: - yield make_index_func + yield from make_index_funcs def all_timeseries_index_generator(k=10): diff --git a/pandas/_vendored/typing_extensions.py b/pandas/_vendored/typing_extensions.py index 53df8da175a56..129d8998faccc 100644 --- a/pandas/_vendored/typing_extensions.py +++ b/pandas/_vendored/typing_extensions.py @@ -409,7 +409,7 @@ def __repr__(self): def __getitem__(self, parameters): item = typing._type_check( - parameters, "{} accepts only single type".format(self._name) + parameters, f"{self._name} accepts only single type" ) return _GenericAlias(self, (item,)) @@ -1671,7 +1671,7 @@ def __class_getitem__(cls, params): params = (params,) if not params and cls is not Tuple: raise TypeError( - "Parameter list to {}[...] cannot be empty".format(cls.__qualname__) + f"Parameter list to {cls.__qualname__}[...] cannot be empty" ) msg = "Parameters to generic types must be types." params = tuple(_type_check(p, msg) for p in params) @@ -2113,7 +2113,7 @@ def __class_getitem__(cls, params): return _AnnotatedAlias(origin, metadata) def __init_subclass__(cls, *args, **kwargs): - raise TypeError("Cannot subclass {}.Annotated".format(cls.__module__)) + raise TypeError(f"Cannot subclass {cls.__module__}.Annotated") def _strip_annotations(t): """Strips the annotations from a given type. @@ -2195,7 +2195,7 @@ def _tree_repr(self, tree): else: tp_repr = origin[0]._tree_repr(origin) metadata_reprs = ", ".join(repr(arg) for arg in metadata) - return "%s[%s, %s]" % (cls, tp_repr, metadata_reprs) + return f"{cls}[{tp_repr}, {metadata_reprs}]" def _subs_tree(self, tvars=None, args=None): # noqa if self is Annotated: @@ -2382,7 +2382,7 @@ def TypeAlias(self, parameters): It's invalid when used anywhere except as in the example above. """ - raise TypeError("{} is not subscriptable".format(self)) + raise TypeError(f"{self} is not subscriptable") elif sys.version_info[:2] >= (3, 7): diff --git a/pandas/_version.py b/pandas/_version.py index 66e756a4744c8..b3fa8530d09eb 100644 --- a/pandas/_version.py +++ b/pandas/_version.py @@ -74,7 +74,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): stderr=(subprocess.PIPE if hide_stderr else None), ) break - except EnvironmentError: + except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue @@ -121,7 +121,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs, "r") + f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -132,7 +132,7 @@ def git_get_keywords(versionfile_abs): if mo: keywords["full"] = mo.group(1) f.close() - except EnvironmentError: + except OSError: pass return keywords diff --git a/pandas/io/clipboard/__init__.py b/pandas/io/clipboard/__init__.py index d16955a98b62f..a8020f4bb4e4f 100644 --- a/pandas/io/clipboard/__init__.py +++ b/pandas/io/clipboard/__init__.py @@ -274,7 +274,7 @@ def copy_dev_clipboard(text): fo.write(text) def paste_dev_clipboard() -> str: - with open("/dev/clipboard", "rt") as fo: + with open("/dev/clipboard") as fo: content = fo.read() return content @@ -521,7 +521,7 @@ def determine_clipboard(): return init_windows_clipboard() if platform.system() == "Linux": - with open("/proc/version", "r") as f: + with open("/proc/version") as f: if "Microsoft" in f.read(): return init_wsl_clipboard() diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index bf4586a4b5b96..cc7b6b0bfea97 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -587,8 +587,7 @@ def _format_regular_rows(self): else: coloffset = 0 - for cell in self._generate_body(coloffset): - yield cell + yield from self._generate_body(coloffset) def _format_hierarchical_rows(self): has_aliases = isinstance(self.header, (tuple, list, np.ndarray, ABCIndex)) @@ -664,8 +663,7 @@ def _format_hierarchical_rows(self): ) gcolidx += 1 - for cell in self._generate_body(gcolidx): - yield cell + yield from self._generate_body(gcolidx) def _generate_body(self, coloffset: int): if self.styler is None: diff --git a/pandas/io/html.py b/pandas/io/html.py index 40fde224a7ae9..9a91b16e52723 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -719,7 +719,7 @@ def _build_doc(self): r = r.getroot() except AttributeError: pass - except (UnicodeDecodeError, IOError) as e: + except (UnicodeDecodeError, OSError) as e: # if the input is a blob of html goop if not is_url(self.io): r = fromstring(self.io, parser=parser) diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index c3977f89ac42f..a0ceb18c8bd20 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -821,7 +821,7 @@ def close(self): if self.should_close: try: self.open_stream.close() - except (IOError, AttributeError): + except (OSError, AttributeError): pass for file_handle in self.file_handles: file_handle.close() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index e850a101a0a63..5e5a89d96f0e5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -364,7 +364,7 @@ def read_hdf( if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: - raise IOError("The HDFStore must be open for reading.") + raise OSError("The HDFStore must be open for reading.") store = path_or_buf auto_close = False @@ -693,7 +693,7 @@ def open(self, mode: str = "a", **kwargs): try: self._handle = tables.open_file(self._path, self._mode, **kwargs) - except IOError as err: # pragma: no cover + except OSError as err: # pragma: no cover if "can not be written" in str(err): print(f"Opening {self._path} in read-only mode") self._handle = tables.open_file(self._path, "r", **kwargs) @@ -724,7 +724,7 @@ def open(self, mode: str = "a", **kwargs): # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == "r" and "Unable to open/create file" in str(err): - raise IOError(str(err)) from err + raise OSError(str(err)) from err raise def close(self): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index df5f6c3d53d30..a8af84e42918d 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1077,7 +1077,7 @@ def close(self) -> None: """ close the handle if its open """ try: self.path_or_buf.close() - except IOError: + except OSError: pass def _set_encoding(self) -> None: diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 602b42022f561..0c64ea824996f 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -342,7 +342,7 @@ def _setup_subplots(self): valid_log = {False, True, "sym", None} input_log = {self.logx, self.logy, self.loglog} if input_log - valid_log: - invalid_log = next(iter((input_log - valid_log))) + invalid_log = next(iter(input_log - valid_log)) raise ValueError( f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given." ) diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index 89fbfbd5b8324..e200f13652a84 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -277,7 +277,7 @@ def test_constructor_with_generator(self): # returned a scalar for a generator exp = Categorical([0, 1, 2]) - cat = Categorical((x for x in [0, 1, 2])) + cat = Categorical(x for x in [0, 1, 2]) tm.assert_categorical_equal(cat, exp) cat = Categorical(range(3)) tm.assert_categorical_equal(cat, exp) diff --git a/pandas/tests/arrays/integer/test_construction.py b/pandas/tests/arrays/integer/test_construction.py index 1893c4554bfbf..e0a4877da6c7e 100644 --- a/pandas/tests/arrays/integer/test_construction.py +++ b/pandas/tests/arrays/integer/test_construction.py @@ -29,7 +29,7 @@ def test_from_dtype_from_float(data): # from int / array expected = pd.Series(data).dropna().reset_index(drop=True) - dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) + dropped = np.array(data.dropna()).astype(np.dtype(dtype.type)) result = pd.Series(dropped, dtype=str(dtype)) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 2fbeec8dd8378..9147360e71c73 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -167,7 +167,7 @@ def _na_value(self): def _formatter(self, boxed=False): if boxed: - return "Decimal: {0}".format + return "Decimal: {}".format return repr @classmethod diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index f7b572a70073a..7d03dadb20dd9 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -137,7 +137,7 @@ def test_combine_add(self, data_repeated): s2 = pd.Series(orig_data2) result = s1.combine(s2, lambda x1, x2: x1 + x2) expected = pd.Series( - ([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]) + [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))] ) self.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 63a2160e128ed..b5e211895672a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -71,7 +71,7 @@ def test_series_with_name_not_matching_column(self): lambda: DataFrame({}), lambda: DataFrame(()), lambda: DataFrame([]), - lambda: DataFrame((_ for _ in [])), + lambda: DataFrame(_ for _ in []), lambda: DataFrame(range(0)), lambda: DataFrame(data=None), lambda: DataFrame(data={}), diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 1bb40b322cd48..6783fc5b66433 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -249,8 +249,8 @@ def test_len(): # issue 11016 df = pd.DataFrame(dict(a=[np.nan] * 3, b=[1, 2, 3])) - assert len(df.groupby(("a"))) == 0 - assert len(df.groupby(("b"))) == 3 + assert len(df.groupby("a")) == 0 + assert len(df.groupby("b")) == 3 assert len(df.groupby(["a", "b"])) == 3 diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 40b4ce46e550b..18ef95c05f291 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -739,7 +739,7 @@ def test_get_group(self): with pytest.raises(ValueError, match=msg): g.get_group("foo") with pytest.raises(ValueError, match=msg): - g.get_group(("foo")) + g.get_group("foo") msg = "must supply a same-length tuple to get_group with multiple grouping keys" with pytest.raises(ValueError, match=msg): g.get_group(("foo", "bar", "baz")) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7720db9d98ebf..f811bd579aaaa 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1360,7 +1360,7 @@ def test_get_indexer_strings_raises(self): def test_get_indexer_numeric_index_boolean_target(self, idx_class): # GH 16877 - numeric_index = idx_class(RangeIndex((4))) + numeric_index = idx_class(RangeIndex(4)) result = numeric_index.get_indexer([True, False, True]) expected = np.array([-1, -1, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index ca8a3ddc95575..0cc61cd7df389 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -745,7 +745,7 @@ def run_tests(df, rhs, right): # make frames multi-type & re-run tests for frame in [df, rhs, right]: frame["joe"] = frame["joe"].astype("float64") - frame["jolie"] = frame["jolie"].map("@{0}".format) + frame["jolie"] = frame["jolie"].map("@{}".format) run_tests(df, rhs, right) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index f00fa6274fca2..cce0783a3c867 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -648,7 +648,7 @@ def test_to_string_unicode_columns(self, float_frame): assert isinstance(result, str) def test_to_string_utf8_columns(self): - n = "\u05d0".encode("utf-8") + n = "\u05d0".encode() with option_context("display.max_rows", 1): df = DataFrame([1, 2], columns=[n]) diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index c40935b2cc5dd..e2ceb95d77053 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -26,7 +26,7 @@ def test_to_csv_with_single_column(self): """ with tm.ensure_clean("test.csv") as path: df1.to_csv(path, header=None, index=None) - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected1 df2 = DataFrame([1, None]) @@ -36,7 +36,7 @@ def test_to_csv_with_single_column(self): """ with tm.ensure_clean("test.csv") as path: df2.to_csv(path, header=None, index=None) - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected2 def test_to_csv_defualt_encoding(self): @@ -58,7 +58,7 @@ def test_to_csv_quotechar(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=1) # 1=QUOTE_ALL - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected expected = """\ @@ -69,7 +69,7 @@ def test_to_csv_quotechar(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=1, quotechar="$") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected with tm.ensure_clean("test.csv") as path: @@ -86,7 +86,7 @@ def test_to_csv_doublequote(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected from _csv import Error @@ -105,7 +105,7 @@ def test_to_csv_escapechar(self): with tm.ensure_clean("test.csv") as path: # QUOTE_ALL df.to_csv(path, quoting=1, doublequote=False, escapechar="\\") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected df = DataFrame({"col": ["a,a", ",bb,"]}) @@ -117,7 +117,7 @@ def test_to_csv_escapechar(self): with tm.ensure_clean("test.csv") as path: df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected def test_csv_to_string(self): @@ -342,7 +342,7 @@ def test_to_csv_string_array_ascii(self): """ with tm.ensure_clean("str_test.csv") as path: df.to_csv(path, encoding="ascii") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected_ascii def test_to_csv_string_array_utf8(self): @@ -356,7 +356,7 @@ def test_to_csv_string_array_utf8(self): """ with tm.ensure_clean("unicode_test.csv") as path: df.to_csv(path, encoding="utf-8") - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected_utf8 def test_to_csv_string_with_lf(self): @@ -467,7 +467,7 @@ def test_to_csv_write_to_open_file(self): with open(path, "w") as f: f.write("manual header\n") df.to_csv(f, header=None, index=None) - with open(path, "r") as f: + with open(path) as f: assert f.read() == expected def test_to_csv_write_to_open_file_with_newline_py3(self): diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index e85fd398964d0..7acdbfd462874 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -137,7 +137,7 @@ def test_to_html_encoding(float_frame, tmp_path): # GH 28663 path = tmp_path / "test.html" float_frame.to_html(path, encoding="gbk") - with open(str(path), "r", encoding="gbk") as f: + with open(str(path), encoding="gbk") as f: assert float_frame.to_html() == f.read() diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index a98644250b328..a93ab6f9cc7aa 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -21,7 +21,7 @@ def test_to_latex_filename(self, float_frame): with tm.ensure_clean("test.tex") as path: float_frame.to_latex(path) - with open(path, "r") as f: + with open(path) as f: assert float_frame.to_latex() == f.read() # test with utf-8 and encoding option (GH 7061) diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index e2007e07c572a..086c0b7ba08b2 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -591,14 +591,14 @@ def test_decode_number_with_32bit_sign_bit(self, val): def test_encode_big_escape(self): # Make sure no Exception is raised. for _ in range(10): - base = "\u00e5".encode("utf-8") + base = "\u00e5".encode() escape_input = base * 1024 * 1024 * 2 ujson.encode(escape_input) def test_decode_big_escape(self): # Make sure no Exception is raised. for _ in range(10): - base = "\u00e5".encode("utf-8") + base = "\u00e5".encode() quote = b'"' escape_input = quote + (base * 1024 * 1024 * 2) + quote diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index 7c58afe867440..ae63b6af3a8b6 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -577,7 +577,7 @@ def test_file_handles_mmap(c_parser_only, csv1): # Don't close user provided file handles. parser = c_parser_only - with open(csv1, "r") as f: + with open(csv1) as f: m = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) parser.read_csv(m) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 1d8d5a29686a4..49358fe2ecfe4 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1726,7 +1726,7 @@ def test_iteration_open_handle(all_parsers): with open(path, "w") as f: f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG") - with open(path, "r") as f: + with open(path) as f: for line in f: if "CCC" in line: break diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index de7b3bed034c7..f23b498c7388a 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -27,7 +27,7 @@ def test_bytes_io_input(all_parsers): def test_read_csv_unicode(all_parsers): parser = all_parsers - data = BytesIO("\u0141aski, Jan;1".encode("utf-8")) + data = BytesIO("\u0141aski, Jan;1".encode()) result = parser.read_csv(data, sep=";", encoding="utf-8", header=None) expected = DataFrame([["\u0141aski, Jan", 1]]) diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index e982667f06f31..127d0dc4c9829 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -173,9 +173,7 @@ def test_read_csv_compat(): def test_bytes_io_input(): - result = read_fwf( - BytesIO("שלום\nשלום".encode("utf8")), widths=[2, 2], encoding="utf8" - ) + result = read_fwf(BytesIO("שלום\nשלום".encode()), widths=[2, 2], encoding="utf8") expected = DataFrame([["של", "ום"]], columns=["של", "ום"]) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py index aad18890de3ad..7e7a76e287d32 100644 --- a/pandas/tests/io/pytables/common.py +++ b/pandas/tests/io/pytables/common.py @@ -25,7 +25,7 @@ def safe_close(store): try: if store is not None: store.close() - except IOError: + except OSError: pass diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 85a12a13d19fb..ede8d61490778 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -339,7 +339,7 @@ def test_constructor_bad_file(self, mmap_file): with pytest.raises(err, match=msg): icom._MMapWrapper(non_file) - target = open(mmap_file, "r") + target = open(mmap_file) target.close() msg = "I/O operation on closed file" @@ -347,7 +347,7 @@ def test_constructor_bad_file(self, mmap_file): icom._MMapWrapper(target) def test_get_attr(self, mmap_file): - with open(mmap_file, "r") as target: + with open(mmap_file) as target: wrapper = icom._MMapWrapper(target) attrs = dir(wrapper.mmap) @@ -360,7 +360,7 @@ def test_get_attr(self, mmap_file): assert not hasattr(wrapper, "foo") def test_next(self, mmap_file): - with open(mmap_file, "r") as target: + with open(mmap_file) as target: wrapper = icom._MMapWrapper(target) lines = target.readlines() diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index 2c93dbb5b6b83..59034e9f3d807 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -114,7 +114,7 @@ def test_to_html_compat(self): c_idx_names=False, r_idx_names=False, ) - .applymap("{0:.3f}".format) + .applymap("{:.3f}".format) .astype(float) ) out = df.to_html() @@ -616,7 +616,7 @@ def try_remove_ws(x): @pytest.mark.slow def test_gold_canyon(self): gc = "Gold Canyon" - with open(self.banklist_data, "r") as f: + with open(self.banklist_data) as f: raw_text = f.read() assert gc in raw_text diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 1edcc937f72c3..32a15e6201037 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -281,7 +281,6 @@ def _get_exec(self): @pytest.fixture(params=[("io", "data", "csv", "iris.csv")]) def load_iris_data(self, datapath, request): - import io iris_csv_file = datapath(*request.param) @@ -291,7 +290,7 @@ def load_iris_data(self, datapath, request): self.drop_table("iris") self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor]) - with io.open(iris_csv_file, mode="r", newline=None) as iris_csv: + with open(iris_csv_file, mode="r", newline=None) as iris_csv: r = csv.reader(iris_csv) next(r) # skip header row ins = SQL_STRINGS["insert_iris"][self.flavor] diff --git a/pandas/tests/reshape/test_get_dummies.py b/pandas/tests/reshape/test_get_dummies.py index ce13762ea8f86..82e0e52c089a2 100644 --- a/pandas/tests/reshape/test_get_dummies.py +++ b/pandas/tests/reshape/test_get_dummies.py @@ -386,7 +386,7 @@ def test_dataframe_dummies_with_categorical(self, df, sparse, dtype): "get_dummies_kwargs,expected", [ ( - {"data": DataFrame(({"ä": ["a"]}))}, + {"data": DataFrame({"ä": ["a"]})}, DataFrame({"ä_a": [1]}, dtype=np.uint8), ), ( diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index 0a7dfbee4e672..5c4d7e191d1bb 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -28,9 +28,9 @@ def test_format(): assert format(NA, ">10") == " " assert format(NA, "xxx") == "" # NA is flexible, accept any format spec - assert "{}".format(NA) == "" - assert "{:>10}".format(NA) == " " - assert "{:xxx}".format(NA) == "" + assert f"{NA}" == "" + assert f"{NA:>10}" == " " + assert f"{NA:xxx}" == "" def test_truthiness(): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index e39083b709f38..6ba55ce3c74b9 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -180,7 +180,7 @@ def test_td64_summation_overflow(self): # mean result = (s - s.min()).mean() - expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum()) + expected = pd.Timedelta((pd.TimedeltaIndex(s - s.min()).asi8 / len(s)).sum()) # the computation is converted to float so # might be some loss of precision diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 8ac0a55e63cd1..1b5fddaf14335 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -49,7 +49,7 @@ class TestSeriesConstructors: (lambda: Series({}), True), (lambda: Series(()), False), # creates a RangeIndex (lambda: Series([]), False), # creates a RangeIndex - (lambda: Series((_ for _ in [])), False), # creates a RangeIndex + (lambda: Series(_ for _ in []), False), # creates a RangeIndex (lambda: Series(data=None), True), (lambda: Series(data={}), True), (lambda: Series(data=()), False), # creates a RangeIndex @@ -222,8 +222,7 @@ def test_constructor_iterable(self): # GH 21987 class Iter: def __iter__(self): - for i in range(10): - yield i + yield from range(10) expected = Series(list(range(10)), dtype="int64") result = Series(Iter(), dtype="int64") diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index bcc0b18134dad..ae89e16ca7667 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -137,13 +137,13 @@ def test_astype_str_cast_dt64(self): ts = Series([Timestamp("2010-01-04 00:00:00")]) s = ts.astype(str) - expected = Series([str("2010-01-04")]) + expected = Series(["2010-01-04"]) tm.assert_series_equal(s, expected) ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")]) s = ts.astype(str) - expected = Series([str("2010-01-04 00:00:00-05:00")]) + expected = Series(["2010-01-04 00:00:00-05:00"]) tm.assert_series_equal(s, expected) def test_astype_str_cast_td64(self): @@ -152,7 +152,7 @@ def test_astype_str_cast_td64(self): td = Series([Timedelta(1, unit="d")]) ser = td.astype(str) - expected = Series([str("1 days")]) + expected = Series(["1 days"]) tm.assert_series_equal(ser, expected) def test_astype_unicode(self): @@ -167,7 +167,7 @@ def test_astype_unicode(self): former_encoding = None if sys.getdefaultencoding() == "utf-8": - test_series.append(Series(["野菜食べないとやばい".encode("utf-8")])) + test_series.append(Series(["野菜食べないとやばい".encode()])) for s in test_series: res = s.astype("unicode") diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 708118e950686..b12ebd58e6a7b 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -66,12 +66,11 @@ def test_from_csv(self, datetime_series, string_series): tm.assert_series_equal(check_series, series) def test_to_csv(self, datetime_series): - import io with tm.ensure_clean() as path: datetime_series.to_csv(path, header=False) - with io.open(path, newline=None) as f: + with open(path, newline=None) as f: lines = f.readlines() assert lines[1] != "\n" diff --git a/scripts/validate_rst_title_capitalization.py b/scripts/validate_rst_title_capitalization.py index 62ec6b9ef07af..b654e27737359 100755 --- a/scripts/validate_rst_title_capitalization.py +++ b/scripts/validate_rst_title_capitalization.py @@ -212,7 +212,7 @@ def find_titles(rst_file: str) -> Iterable[Tuple[str, int]]: The corresponding line number of the heading. """ - with open(rst_file, "r") as fd: + with open(rst_file) as fd: previous_line = "" for i, line in enumerate(fd): line = line[:-1] @@ -250,10 +250,9 @@ def find_rst_files(source_paths: List[str]) -> Iterable[str]: elif directory_address.endswith(".rst"): yield directory_address else: - for filename in glob.glob( + yield from glob.glob( pathname=f"{directory_address}/**/*.rst", recursive=True - ): - yield filename + ) def main(source_paths: List[str], output_format: str) -> int: diff --git a/scripts/validate_unwanted_patterns.py b/scripts/validate_unwanted_patterns.py index 4a0e859535215..b6ffab1482bbc 100755 --- a/scripts/validate_unwanted_patterns.py +++ b/scripts/validate_unwanted_patterns.py @@ -447,7 +447,7 @@ def main( if os.path.isfile(source_path): file_path = source_path - with open(file_path, "r") as file_obj: + with open(file_path) as file_obj: for line_number, msg in function(file_obj): is_failed = True print( @@ -466,7 +466,7 @@ def main( continue file_path = os.path.join(subdir, file_name) - with open(file_path, "r") as file_obj: + with open(file_path) as file_obj: for line_number, msg in function(file_obj): is_failed = True print( diff --git a/setup.py b/setup.py index a8dfeb0974195..8f447d5c38169 100755 --- a/setup.py +++ b/setup.py @@ -99,7 +99,7 @@ def render_templates(cls, pxifiles): # if .pxi.in is not updated, no need to output .pxi continue - with open(pxifile, "r") as f: + with open(pxifile) as f: tmpl = f.read() pyxcontent = tempita.sub(tmpl) diff --git a/versioneer.py b/versioneer.py index 5882349f65f0b..65c9523ba5573 100644 --- a/versioneer.py +++ b/versioneer.py @@ -349,7 +349,7 @@ import sys -class VersioneerConfig(object): +class VersioneerConfig: pass @@ -398,7 +398,7 @@ def get_config_from_root(root): # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: + with open(setup_cfg) as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory @@ -451,7 +451,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): stderr=(subprocess.PIPE if hide_stderr else None), ) break - except EnvironmentError: + except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue @@ -461,7 +461,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): return None else: if verbose: - print("unable to find command, tried %s" % (commands,)) + print(f"unable to find command, tried {commands}") return None stdout = p.communicate()[0].strip().decode() @@ -946,7 +946,7 @@ def git_get_keywords(versionfile_abs): # _version.py. keywords = {} try: - f = open(versionfile_abs, "r") + f = open(versionfile_abs) for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) @@ -957,7 +957,7 @@ def git_get_keywords(versionfile_abs): if mo: keywords["full"] = mo.group(1) f.close() - except EnvironmentError: + except OSError: pass return keywords @@ -1072,9 +1072,8 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( - full_tag, - tag_prefix, + pieces["error"] = "tag '{}' doesn't start with prefix '{}'".format( + full_tag, tag_prefix, ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix) :] @@ -1111,13 +1110,13 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): files.append(versioneer_file) present = False try: - f = open(".gitattributes", "r") + f = open(".gitattributes") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() - except EnvironmentError: + except OSError: pass if not present: f = open(".gitattributes", "a+") @@ -1171,7 +1170,7 @@ def versions_from_file(filename): try: with open(filename) as f: contents = f.read() - except EnvironmentError: + except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search( r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S @@ -1187,7 +1186,7 @@ def write_to_version_file(filename, versions): with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) - print("set %s to '%s'" % (filename, versions["version"])) + print("set {} to '{}'".format(filename, versions["version"])) def plus_or_dot(pieces): @@ -1399,7 +1398,7 @@ def get_versions(verbose=False): try: ver = versions_from_file(versionfile_abs) if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) + print(f"got version from file {versionfile_abs} {ver}") return ver except NotThisMethod: pass @@ -1619,11 +1618,7 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except ( - EnvironmentError, - configparser.NoSectionError, - configparser.NoOptionError, - ) as e: + except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: @@ -1648,9 +1643,9 @@ def do_setup(): ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: - with open(ipy, "r") as f: + with open(ipy) as f: old = f.read() - except EnvironmentError: + except OSError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) @@ -1669,12 +1664,12 @@ def do_setup(): manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: - with open(manifest_in, "r") as f: + with open(manifest_in) as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) - except EnvironmentError: + except OSError: pass # That doesn't cover everything MANIFEST.in can do # (https://docs.python.org/2/distutils/sourcedist.html#commands), so @@ -1707,7 +1702,7 @@ def scan_setup_py(): found = set() setters = False errors = 0 - with open("setup.py", "r") as f: + with open("setup.py") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") From 51b016594eb1b9fa8d1f1aa96b07305adfce7adb Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Fri, 18 Sep 2020 17:50:17 -0500 Subject: [PATCH 607/632] CI: Revert PR template (#36460) --- .github/PULL_REQUEST_TEMPLATE.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 5e4d3b4ec38e4..7c3870470f074 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,9 +1,3 @@ ---- - -labels: "Needs Review" - ---- - - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` From 20ee6d25e42bf8e5ba5514c643e38c921f536d14 Mon Sep 17 00:00:00 2001 From: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com> Date: Fri, 18 Sep 2020 20:56:47 -0400 Subject: [PATCH 608/632] BUG: Concat typing (#36409) --- pandas/core/reshape/concat.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index dd4bcf77641ef..a07c7b49ac55b 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -31,6 +31,7 @@ if TYPE_CHECKING: from pandas import DataFrame + from pandas.core.generic import NDFrame # --------------------------------------------------------------------- # Concatenate DataFrame objects @@ -54,7 +55,7 @@ def concat( @overload def concat( - objs: Union[Iterable[FrameOrSeries], Mapping[Label, FrameOrSeries]], + objs: Union[Iterable["NDFrame"], Mapping[Label, "NDFrame"]], axis=0, join: str = "outer", ignore_index: bool = False, @@ -69,7 +70,7 @@ def concat( def concat( - objs: Union[Iterable[FrameOrSeries], Mapping[Label, FrameOrSeries]], + objs: Union[Iterable["NDFrame"], Mapping[Label, "NDFrame"]], axis=0, join="outer", ignore_index: bool = False, From 353ce7e1a82a4742345435ff723307c293fdbf3d Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Fri, 18 Sep 2020 17:58:25 -0700 Subject: [PATCH 609/632] REF: MultiIndex._validate_insert_value, IntervaArray._validate_setitem_value (#36461) --- pandas/core/arrays/interval.py | 68 ++++++++++++++++++---------------- pandas/core/indexes/multi.py | 16 +++++--- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index ff9dd3f2a85bc..f9f68004bcc23 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -547,38 +547,7 @@ def __getitem__(self, value): return self._shallow_copy(left, right) def __setitem__(self, key, value): - # na value: need special casing to set directly on numpy arrays - needs_float_conversion = False - if is_scalar(value) and isna(value): - if is_integer_dtype(self.dtype.subtype): - # can't set NaN on a numpy integer array - needs_float_conversion = True - elif is_datetime64_any_dtype(self.dtype.subtype): - # need proper NaT to set directly on the numpy array - value = np.datetime64("NaT") - elif is_timedelta64_dtype(self.dtype.subtype): - # need proper NaT to set directly on the numpy array - value = np.timedelta64("NaT") - value_left, value_right = value, value - - # scalar interval - elif is_interval_dtype(value) or isinstance(value, Interval): - self._check_closed_matches(value, name="value") - value_left, value_right = value.left, value.right - - else: - # list-like of intervals - try: - array = IntervalArray(value) - value_left, value_right = array.left, array.right - except TypeError as err: - # wrong type: not interval or NA - msg = f"'value' should be an interval type, got {type(value)} instead." - raise TypeError(msg) from err - - if needs_float_conversion: - raise ValueError("Cannot set float NaN to integer-backed IntervalArray") - + value_left, value_right = self._validate_setitem_value(value) key = check_array_indexer(self, key) # Need to ensure that left and right are updated atomically, so we're @@ -898,6 +867,41 @@ def _validate_insert_value(self, value): ) return left_insert, right_insert + def _validate_setitem_value(self, value): + needs_float_conversion = False + + if is_scalar(value) and isna(value): + # na value: need special casing to set directly on numpy arrays + if is_integer_dtype(self.dtype.subtype): + # can't set NaN on a numpy integer array + needs_float_conversion = True + elif is_datetime64_any_dtype(self.dtype.subtype): + # need proper NaT to set directly on the numpy array + value = np.datetime64("NaT") + elif is_timedelta64_dtype(self.dtype.subtype): + # need proper NaT to set directly on the numpy array + value = np.timedelta64("NaT") + value_left, value_right = value, value + + elif is_interval_dtype(value) or isinstance(value, Interval): + # scalar interval + self._check_closed_matches(value, name="value") + value_left, value_right = value.left, value.right + + else: + try: + # list-like of intervals + array = IntervalArray(value) + value_left, value_right = array.left, array.right + except TypeError as err: + # wrong type: not interval or NA + msg = f"'value' should be an interval type, got {type(value)} instead." + raise TypeError(msg) from err + + if needs_float_conversion: + raise ValueError("Cannot set float NaN to integer-backed IntervalArray") + return value_left, value_right + def value_counts(self, dropna=True): """ Returns a Series containing counts of each interval. diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a21a54e4a9be3..cd3e384837280 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -3596,6 +3596,15 @@ def astype(self, dtype, copy=True): return self._shallow_copy() return self + def _validate_insert_value(self, item): + if not isinstance(item, tuple): + # Pad the key with empty strings if lower levels of the key + # aren't specified: + item = (item,) + ("",) * (self.nlevels - 1) + elif len(item) != self.nlevels: + raise ValueError("Item must have length equal to number of levels.") + return item + def insert(self, loc: int, item): """ Make new MultiIndex inserting new item at location @@ -3610,12 +3619,7 @@ def insert(self, loc: int, item): ------- new_index : Index """ - # Pad the key with empty strings if lower levels of the key - # aren't specified: - if not isinstance(item, tuple): - item = (item,) + ("",) * (self.nlevels - 1) - elif len(item) != self.nlevels: - raise ValueError("Item must have length equal to number of levels.") + item = self._validate_insert_value(item) new_levels = [] new_codes = [] From 3e1cc56d356d203d705772f545ccd0eec364be99 Mon Sep 17 00:00:00 2001 From: Joris Van den Bossche Date: Sat, 19 Sep 2020 04:13:30 +0200 Subject: [PATCH 610/632] DEPR: Index.to_native_types (#36418) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/core/indexes/base.py | 8 +++++ pandas/io/formats/csvs.py | 6 ++-- .../tests/indexes/datetimes/test_formats.py | 34 +++++++++++++------ pandas/tests/indexes/interval/test_formats.py | 2 +- pandas/tests/indexes/period/test_formats.py | 16 +++------ 6 files changed, 41 insertions(+), 26 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 33a5b016a293f..1286577748afa 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -212,6 +212,7 @@ Deprecations - Deprecated parameter ``dtype`` in :~meth:`Index.copy` on method all index classes. Use the :meth:`Index.astype` method instead for changing dtype(:issue:`35853`) - Date parser functions :func:`~pandas.io.date_converters.parse_date_time`, :func:`~pandas.io.date_converters.parse_date_fields`, :func:`~pandas.io.date_converters.parse_all_fields` and :func:`~pandas.io.date_converters.generic_parser` from ``pandas.io.date_converters`` are deprecated and will be removed in a future version; use :func:`to_datetime` instead (:issue:`35741`) - :meth:`DataFrame.lookup` is deprecated and will be removed in a future version, use :meth:`DataFrame.melt` and :meth:`DataFrame.loc` instead (:issue:`18682`) +- The :meth:`Index.to_native_types` is deprecated. Use ``.astype(str)`` instead (:issue:`28867`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 962a425afbd1e..5f2b901844dad 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1021,6 +1021,8 @@ def to_native_types(self, slicer=None, **kwargs): """ Format specified values of `self` and return them. + .. deprecated:: 1.2.0 + Parameters ---------- slicer : int, array-like @@ -1042,6 +1044,12 @@ def to_native_types(self, slicer=None, **kwargs): numpy.ndarray Formatted values. """ + warnings.warn( + "The 'to_native_types' method is deprecated and will be removed in " + "a future version. Use 'astype(str)' instead.", + FutureWarning, + stacklevel=2, + ) values = self if slicer is not None: values = values[slicer] diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 4250a08f748d7..d0e9163fc5f11 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -154,7 +154,7 @@ def _refine_cols(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]: if cols is not None: if isinstance(cols, ABCIndexClass): - cols = cols.to_native_types(**self._number_format) + cols = cols._format_native_types(**self._number_format) else: cols = list(cols) self.obj = self.obj.loc[:, cols] @@ -163,7 +163,7 @@ def _refine_cols(self, cols: Optional[Sequence[Label]]) -> Sequence[Label]: # and make sure sure cols is just a list of labels cols = self.obj.columns if isinstance(cols, ABCIndexClass): - return cols.to_native_types(**self._number_format) + return cols._format_native_types(**self._number_format) else: assert isinstance(cols, Sequence) return list(cols) @@ -341,5 +341,5 @@ def _save_chunk(self, start_i: int, end_i: int) -> None: res = df._mgr.to_native_types(**self._number_format) data = [res.iget_values(i) for i in range(len(res.items))] - ix = self.data_index.to_native_types(slicer=slicer, **self._number_format) + ix = self.data_index[slicer]._format_native_types(**self._number_format) libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer) diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index f34019e06fd5f..a98a96b436107 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -10,41 +10,53 @@ import pandas._testing as tm -def test_to_native_types(): +def test_to_native_types_method_deprecated(): index = pd.date_range(freq="1D", periods=3, start="2017-01-01") - - # First, with no arguments. expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype=object) - result = index.to_native_types() - tm.assert_numpy_array_equal(result, expected) + with tm.assert_produces_warning(FutureWarning): + result = index.to_native_types() - # No NaN values, so na_rep has no effect - result = index.to_native_types(na_rep="pandas") tm.assert_numpy_array_equal(result, expected) # Make sure slicing works expected = np.array(["2017-01-01", "2017-01-03"], dtype=object) - result = index.to_native_types([0, 2]) + with tm.assert_produces_warning(FutureWarning): + result = index.to_native_types([0, 2]) + + tm.assert_numpy_array_equal(result, expected) + + +def test_to_native_types(): + index = pd.date_range(freq="1D", periods=3, start="2017-01-01") + + # First, with no arguments. + expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype=object) + + result = index._format_native_types() + tm.assert_numpy_array_equal(result, expected) + + # No NaN values, so na_rep has no effect + result = index._format_native_types(na_rep="pandas") tm.assert_numpy_array_equal(result, expected) # Make sure date formatting works expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype=object) - result = index.to_native_types(date_format="%m-%Y-%d") + result = index._format_native_types(date_format="%m-%Y-%d") tm.assert_numpy_array_equal(result, expected) # NULL object handling should work index = DatetimeIndex(["2017-01-01", pd.NaT, "2017-01-03"]) expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object) - result = index.to_native_types() + result = index._format_native_types() tm.assert_numpy_array_equal(result, expected) expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object) - result = index.to_native_types(na_rep="pandas") + result = index._format_native_types(na_rep="pandas") tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py index 7acf5c1e0906c..0e8d7d1ba5aba 100644 --- a/pandas/tests/indexes/interval/test_formats.py +++ b/pandas/tests/indexes/interval/test_formats.py @@ -73,6 +73,6 @@ def test_repr_missing(self, constructor, expected): def test_to_native_types(self, tuples, closed, expected_data): # GH 28210 index = IntervalIndex.from_tuples(tuples, closed=closed) - result = index.to_native_types() + result = index._format_native_types() expected = np.array(expected_data) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 5db373a9f07ae..150a797169c14 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -12,35 +12,29 @@ def test_to_native_types(): # First, with no arguments. expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype="=U10") - result = index.to_native_types() + result = index._format_native_types() tm.assert_numpy_array_equal(result, expected) # No NaN values, so na_rep has no effect - result = index.to_native_types(na_rep="pandas") - tm.assert_numpy_array_equal(result, expected) - - # Make sure slicing works - expected = np.array(["2017-01-01", "2017-01-03"], dtype="=U10") - - result = index.to_native_types([0, 2]) + result = index._format_native_types(na_rep="pandas") tm.assert_numpy_array_equal(result, expected) # Make sure date formatting works expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype="=U10") - result = index.to_native_types(date_format="%m-%Y-%d") + result = index._format_native_types(date_format="%m-%Y-%d") tm.assert_numpy_array_equal(result, expected) # NULL object handling should work index = PeriodIndex(["2017-01-01", pd.NaT, "2017-01-03"], freq="D") expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object) - result = index.to_native_types() + result = index._format_native_types() tm.assert_numpy_array_equal(result, expected) expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object) - result = index.to_native_types(na_rep="pandas") + result = index._format_native_types(na_rep="pandas") tm.assert_numpy_array_equal(result, expected) From aed64e85eb17edb0e55013868b1aa4e44e977a36 Mon Sep 17 00:00:00 2001 From: Hans Date: Sat, 19 Sep 2020 04:14:25 +0200 Subject: [PATCH 611/632] BUG: fix isin with nans and large arrays (#36266) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/core/algorithms.py | 7 ++++++- pandas/tests/test_algos.py | 18 +++++++++++++++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 1d386fa372ce1..7d658215d7b76 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -47,6 +47,7 @@ Bug fixes - Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`) - Bug in :meth:`DataFrame.sort_values` raising an ``AttributeError`` when sorting on a key that casts column to categorical dtype (:issue:`36383`) - Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`) +- Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 2ce3f2d9a7bfa..50d1810fee30d 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -440,7 +440,12 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1_000_000 and not is_object_dtype(comps): - f = np.in1d + # If the the values include nan we need to check for nan explicitly + # since np.nan it not equal to np.nan + if np.isnan(values).any(): + f = lambda c, v: np.logical_or(np.in1d(c, v), np.isnan(c)) + else: + f = np.in1d elif is_integer_dtype(comps): try: values = values.astype("int64", copy=False) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index a2c2ae22a0b62..6102f43f4db6a 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -801,7 +801,6 @@ def test_i8(self): tm.assert_numpy_array_equal(result, expected) def test_large(self): - s = pd.date_range("20000101", periods=2000000, freq="s").values result = algos.isin(s, s[0:2]) expected = np.zeros(len(s), dtype=bool) @@ -841,6 +840,23 @@ def test_same_nan_is_in(self): result = algos.isin(comps, values) tm.assert_numpy_array_equal(expected, result) + def test_same_nan_is_in_large(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + s[0] = np.nan + result = algos.isin(s, [np.nan, 1]) + expected = np.ones(len(s), dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_same_nan_is_in_large_series(self): + # https://github.com/pandas-dev/pandas/issues/22205 + s = np.tile(1.0, 1_000_001) + series = pd.Series(s) + s[0] = np.nan + result = series.isin([np.nan, 1]) + expected = pd.Series(np.ones(len(s), dtype=bool)) + tm.assert_series_equal(result, expected) + def test_same_object_is_in(self): # GH 22160 # there could be special treatment for nans From 5b04079a7aad9b9d611cd2462d267f736858b605 Mon Sep 17 00:00:00 2001 From: sm1899 <42005691+sm1899@users.noreply.github.com> Date: Sat, 19 Sep 2020 13:06:12 +0530 Subject: [PATCH 612/632] Remove unnecessary trailing commas (#36463) * removed trailing comma * removed trailing commas * doc/make.py * doc/make.py * Update make.py --- doc/make.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/make.py b/doc/make.py index db729853e5834..94fbfa9382d81 100755 --- a/doc/make.py +++ b/doc/make.py @@ -291,7 +291,7 @@ def main(): joined = ", ".join(cmds) argparser.add_argument( - "command", nargs="?", default="html", help=f"command to run: {joined}", + "command", nargs="?", default="html", help=f"command to run: {joined}" ) argparser.add_argument( "--num-jobs", type=int, default=0, help="number of jobs used by sphinx-build" From ff11c056f0a3d5a7ee2a20b52f85c9c04af95e38 Mon Sep 17 00:00:00 2001 From: attack68 <24256554+attack68@users.noreply.github.com> Date: Sat, 19 Sep 2020 17:21:56 +0200 Subject: [PATCH 613/632] PERF: styler uuid control and security (#36345) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/io/formats/style.py | 16 +++++++++++++--- pandas/tests/io/formats/test_style.py | 20 ++++++++++++++++++++ 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 1286577748afa..5882b74aa8b05 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -226,6 +226,7 @@ Performance improvements - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`) - Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) +- ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`) .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index b5f0bc0a832c2..1df37da3da8d0 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -18,7 +18,7 @@ Tuple, Union, ) -from uuid import uuid1 +from uuid import uuid4 import numpy as np @@ -89,6 +89,12 @@ class Styler: .. versionadded:: 1.0.0 + uuid_len : int, default 5 + If ``uuid`` is not specified, the length of the ``uuid`` to randomly generate + expressed in hex characters, in range [0, 32]. + + .. versionadded:: 1.2.0 + Attributes ---------- env : Jinja2 jinja2.Environment @@ -144,6 +150,7 @@ def __init__( table_attributes: Optional[str] = None, cell_ids: bool = True, na_rep: Optional[str] = None, + uuid_len: int = 5, ): self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list) self._todo: List[Tuple[Callable, Tuple, Dict]] = [] @@ -159,7 +166,10 @@ def __init__( self.index = data.index self.columns = data.columns - self.uuid = uuid + if not isinstance(uuid_len, int) or not uuid_len >= 0: + raise TypeError("``uuid_len`` must be an integer in range [0, 32].") + self.uuid_len = min(32, uuid_len) + self.uuid = (uuid or uuid4().hex[: self.uuid_len]) + "_" self.table_styles = table_styles self.caption = caption if precision is None: @@ -248,7 +258,7 @@ def _translate(self): precision = self.precision hidden_index = self.hidden_index hidden_columns = self.hidden_columns - uuid = self.uuid or str(uuid1()).replace("-", "_") + uuid = self.uuid ROW_HEADING_CLASS = "row_heading" COL_HEADING_CLASS = "col_heading" INDEX_NAME_CLASS = "index_name" diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index e7583e1ce2ce2..8d66a16fc2b7a 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -1718,6 +1718,26 @@ def test_colspan_w3(self): s = Styler(df, uuid="_", cell_ids=False) assert '' in s.render() + @pytest.mark.parametrize("len_", [1, 5, 32, 33, 100]) + def test_uuid_len(self, len_): + # GH 36345 + df = pd.DataFrame(data=[["A"]]) + s = Styler(df, uuid_len=len_, cell_ids=False).render() + strt = s.find('id="T_') + end = s[strt + 6 :].find('"') + if len_ > 32: + assert end == 32 + 1 + else: + assert end == len_ + 1 + + @pytest.mark.parametrize("len_", [-2, "bad", None]) + def test_uuid_len_raises(self, len_): + # GH 36345 + df = pd.DataFrame(data=[["A"]]) + msg = "``uuid_len`` must be an integer in range \\[0, 32\\]." + with pytest.raises(TypeError, match=msg): + Styler(df, uuid_len=len_, cell_ids=False).render() + @td.skip_if_no_mpl class TestStylerMatplotlibDep: From 80f0a74c90cd50bd3502edd521d6d915e5bd3bc5 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 19 Sep 2020 08:34:44 -0700 Subject: [PATCH 614/632] Align cython and python reduction code paths (#36459) --- pandas/_libs/reduction.pyx | 17 ++++++++++++----- pandas/core/groupby/generic.py | 15 +++++++++++---- pandas/core/groupby/ops.py | 23 +++++++++++------------ 3 files changed, 34 insertions(+), 21 deletions(-) diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 8161b5c5c2b11..3a0fda5aed620 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -16,12 +16,12 @@ from pandas._libs cimport util from pandas._libs.lib import is_scalar, maybe_convert_objects -cdef _check_result_array(object obj, Py_ssize_t cnt): +cpdef check_result_array(object obj, Py_ssize_t cnt): if (util.is_array(obj) or (isinstance(obj, list) and len(obj) == cnt) or getattr(obj, 'shape', None) == (cnt,)): - raise ValueError('Function does not reduce') + raise ValueError('Must produce aggregated value') cdef class _BaseGrouper: @@ -74,12 +74,14 @@ cdef class _BaseGrouper: cached_ityp._engine.clear_mapping() cached_ityp._cache.clear() # e.g. inferred_freq must go res = self.f(cached_typ) - res = _extract_result(res) + res = extract_result(res) if not initialized: # On the first pass, we check the output shape to see # if this looks like a reduction. initialized = True - _check_result_array(res, len(self.dummy_arr)) + # In all tests other than test_series_grouper and + # test_series_bin_grouper, we have len(self.dummy_arr) == 0 + check_result_array(res, len(self.dummy_arr)) return res, initialized @@ -278,9 +280,14 @@ cdef class SeriesGrouper(_BaseGrouper): return result, counts -cdef inline _extract_result(object res, bint squeeze=True): +cpdef inline extract_result(object res, bint squeeze=True): """ extract the result object, it might be a 0-dim ndarray or a len-1 0-dim, or a scalar """ + if hasattr(res, "_values"): + # Preserve EA + res = res._values + if squeeze and res.ndim == 1 and len(res) == 1: + res = res[0] if hasattr(res, 'values') and util.is_array(res.values): res = res.values if util.is_array(res): diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index bbccd22f2ae85..0705261d0c516 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -29,7 +29,7 @@ import numpy as np -from pandas._libs import lib +from pandas._libs import lib, reduction as libreduction from pandas._typing import ArrayLike, FrameOrSeries, FrameOrSeriesUnion from pandas.util._decorators import Appender, Substitution, doc @@ -471,12 +471,19 @@ def _get_index() -> Index: def _aggregate_named(self, func, *args, **kwargs): result = {} + initialized = False for name, group in self: - group.name = name + # Each step of this loop corresponds to + # libreduction._BaseGrouper._apply_to_group + group.name = name # NB: libreduction does not pin name + output = func(group, *args, **kwargs) - if isinstance(output, (Series, Index, np.ndarray)): - raise ValueError("Must produce aggregated value") + output = libreduction.extract_result(output) + if not initialized: + # We only do this validation on the first iteration + libreduction.check_result_array(output, 0) + initialized = True result[name] = output return result diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index e9525f03368fa..b3f91d4623c84 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -623,7 +623,7 @@ def agg_series(self, obj: Series, func: F): try: return self._aggregate_series_fast(obj, func) except ValueError as err: - if "Function does not reduce" in str(err): + if "Must produce aggregated value" in str(err): # raised in libreduction pass else: @@ -653,27 +653,26 @@ def _aggregate_series_pure_python(self, obj: Series, func: F): group_index, _, ngroups = self.group_info counts = np.zeros(ngroups, dtype=int) - result = None + result = np.empty(ngroups, dtype="O") + initialized = False splitter = get_splitter(obj, group_index, ngroups, axis=0) for label, group in splitter: + + # Each step of this loop corresponds to + # libreduction._BaseGrouper._apply_to_group res = func(group) + res = libreduction.extract_result(res) - if result is None: - if isinstance(res, (Series, Index, np.ndarray)): - if len(res) == 1: - # e.g. test_agg_lambda_with_timezone lambda e: e.head(1) - # FIXME: are we potentially losing important res.index info? - res = res.item() - else: - raise ValueError("Function does not reduce") - result = np.empty(ngroups, dtype="O") + if not initialized: + # We only do this validation on the first iteration + libreduction.check_result_array(res, 0) + initialized = True counts[label] = group.shape[0] result[label] = res - assert result is not None result = lib.maybe_convert_objects(result, try_float=0) # TODO: maybe_cast_to_extension_array? From b3e2c6c1e6f92e228451e3395744182a2459da45 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sat, 19 Sep 2020 14:52:24 -0500 Subject: [PATCH 615/632] Turn on stale GitHub action (#36476) --- .github/workflows/stale-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml index a6aece34478d9..e3b8d9336a5a6 100644 --- a/.github/workflows/stale-pr.yml +++ b/.github/workflows/stale-pr.yml @@ -18,4 +18,4 @@ jobs: days-before-stale: 30 days-before-close: -1 remove-stale-when-updated: true - debug-only: true + debug-only: false From 605efc6532b850aa5f8b4ba678e0d7c59309dddb Mon Sep 17 00:00:00 2001 From: Terji Petersen Date: Sat, 19 Sep 2020 20:54:16 +0100 Subject: [PATCH 616/632] PERF: construct DataFrame with string array and dtype=str (#36432) --- asv_bench/benchmarks/strings.py | 17 ++++++++++++----- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/internals/construction.py | 20 +++++++++++--------- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 2023858181baa..d8b35abb94b9d 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -13,13 +13,20 @@ class Construction: param_names = ["dtype"] def setup(self, dtype): - self.data = tm.rands_array(nchars=10 ** 5, size=10) + self.series_arr = tm.rands_array(nchars=10, size=10 ** 5) + self.frame_arr = self.series_arr.reshape((50_000, 2)).copy() - def time_construction(self, dtype): - Series(self.data, dtype=dtype) + def time_series_construction(self, dtype): + Series(self.series_arr, dtype=dtype) - def peakmem_construction(self, dtype): - Series(self.data, dtype=dtype) + def peakmem_series_construction(self, dtype): + Series(self.series_arr, dtype=dtype) + + def time_frame_construction(self, dtype): + DataFrame(self.frame_arr, dtype=dtype) + + def peakmem_frame_construction(self, dtype): + DataFrame(self.frame_arr, dtype=dtype) class Methods: diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 5882b74aa8b05..35d813962022d 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -222,7 +222,7 @@ Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- Performance improvements when creating Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`) +- Performance improvements when creating DataFrame or Series with dtype `str` or :class:`StringDtype` from array with many string elements (:issue:`36304`, :issue:`36317`, :issue:`36325`, :issue:`36432`) - Performance improvement in :meth:`GroupBy.agg` with the ``numba`` engine (:issue:`35759`) - Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`) - Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 2d4163e0dee89..d19a0dd8f29e3 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -13,6 +13,7 @@ from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, + construct_1d_ndarray_preserving_na, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike, @@ -189,15 +190,16 @@ def init_ndarray(values, index, columns, dtype: Optional[DtypeObj], copy: bool): # the dtypes will be coerced to a single dtype values = _prep_ndarray(values, copy=copy) - if dtype is not None: - if not is_dtype_equal(values.dtype, dtype): - try: - values = values.astype(dtype) - except Exception as orig: - # e.g. ValueError when trying to cast object dtype to float64 - raise ValueError( - f"failed to cast to '{dtype}' (Exception was: {orig})" - ) from orig + if dtype is not None and not is_dtype_equal(values.dtype, dtype): + try: + values = construct_1d_ndarray_preserving_na( + values.ravel(), dtype=dtype, copy=False + ).reshape(values.shape) + except Exception as orig: + # e.g. ValueError when trying to cast object dtype to float64 + raise ValueError( + f"failed to cast to '{dtype}' (Exception was: {orig})" + ) from orig # _prep_ndarray ensures that values.ndim == 2 at this point index, columns = _get_axes( From 51ffcdba158202448e83056e5fe28bdf185ba651 Mon Sep 17 00:00:00 2001 From: hardikpnsp Date: Sun, 20 Sep 2020 01:25:00 +0530 Subject: [PATCH 617/632] ASV: added benchamark tests for DataFrame.to_numpy() and .values (#36452) --- asv_bench/benchmarks/frame_methods.py | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 44f71b392c0eb..70d90ded84545 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -219,6 +219,46 @@ def time_to_html_mixed(self): self.df2.to_html() +class ToNumpy: + def setup(self): + N = 10000 + M = 10 + self.df_tall = DataFrame(np.random.randn(N, M)) + self.df_wide = DataFrame(np.random.randn(M, N)) + self.df_mixed_tall = self.df_tall.copy() + self.df_mixed_tall["foo"] = "bar" + self.df_mixed_tall[0] = period_range("2000", periods=N) + self.df_mixed_tall[1] = range(N) + self.df_mixed_wide = self.df_wide.copy() + self.df_mixed_wide["foo"] = "bar" + self.df_mixed_wide[0] = period_range("2000", periods=M) + self.df_mixed_wide[1] = range(M) + + def time_to_numpy_tall(self): + self.df_tall.to_numpy() + + def time_to_numpy_wide(self): + self.df_wide.to_numpy() + + def time_to_numpy_mixed_tall(self): + self.df_mixed_tall.to_numpy() + + def time_to_numpy_mixed_wide(self): + self.df_mixed_wide.to_numpy() + + def time_values_tall(self): + self.df_tall.values + + def time_values_wide(self): + self.df_wide.values + + def time_values_mixed_tall(self): + self.df_mixed_tall.values + + def time_values_mixed_wide(self): + self.df_mixed_wide.values + + class Repr: def setup(self): nrows = 10000 From 54f23e8f1f02c03aa3e166bda88e9d5b051f1054 Mon Sep 17 00:00:00 2001 From: Alex Lim Date: Sat, 19 Sep 2020 15:56:05 -0400 Subject: [PATCH 618/632] BUG: get_indexer returned dtype (#36431) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/hashtable.pxd | 2 +- pandas/_libs/hashtable_class_helper.pxi.in | 14 +++++++------- pandas/_libs/index.pyx | 6 +++--- pandas/tests/base/test_misc.py | 2 +- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 35d813962022d..7724acdd8602c 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -295,6 +295,7 @@ Indexing - Bug in :meth:`PeriodIndex.get_loc` incorrectly raising ``ValueError`` on non-datelike strings instead of ``KeyError``, causing similar errors in :meth:`Series.__geitem__`, :meth:`Series.__contains__`, and :meth:`Series.loc.__getitem__` (:issue:`34240`) - Bug in :meth:`Index.sort_values` where, when empty values were passed, the method would break by trying to compare missing values instead of pushing them to the end of the sort order. (:issue:`35584`) +- Bug in :meth:`Index.get_indexer` and :meth:`Index.get_indexer_non_unique` where int64 arrays are returned instead of intp. (:issue:`36359`) - Missing diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index 0499eabf708af..2650bea921b3f 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -1,7 +1,7 @@ from pandas._libs.khash cimport ( kh_int64_t, kh_uint64_t, kh_float64_t, kh_pymap_t, kh_str_t, uint64_t, int64_t, float64_t) -from numpy cimport ndarray +from numpy cimport ndarray, intp_t # prototypes for sharing diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 5e4da96d57e42..da91fa69b0dec 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -347,7 +347,7 @@ cdef class {{name}}HashTable(HashTable): int ret = 0 {{dtype}}_t val khiter_t k - int64_t[:] locs = np.empty(n, dtype=np.int64) + intp_t[:] locs = np.empty(n, dtype=np.intp) with nogil: for i in range(n): @@ -551,7 +551,7 @@ cdef class {{name}}HashTable(HashTable): def get_labels_groupby(self, const {{dtype}}_t[:] values): cdef: Py_ssize_t i, n = len(values) - int64_t[:] labels + intp_t[:] labels Py_ssize_t idx, count = 0 int ret = 0 {{dtype}}_t val @@ -559,7 +559,7 @@ cdef class {{name}}HashTable(HashTable): {{name}}Vector uniques = {{name}}Vector() {{name}}VectorData *ud - labels = np.empty(n, dtype=np.int64) + labels = np.empty(n, dtype=np.intp) ud = uniques.data with nogil: @@ -648,8 +648,8 @@ cdef class StringHashTable(HashTable): def get_indexer(self, ndarray[object] values): cdef: Py_ssize_t i, n = len(values) - ndarray[int64_t] labels = np.empty(n, dtype=np.int64) - int64_t *resbuf = labels.data + ndarray[intp_t] labels = np.empty(n, dtype=np.intp) + intp_t *resbuf = labels.data khiter_t k kh_str_t *table = self.table const char *v @@ -680,7 +680,7 @@ cdef class StringHashTable(HashTable): object val const char *v khiter_t k - int64_t[:] locs = np.empty(n, dtype=np.int64) + intp_t[:] locs = np.empty(n, dtype=np.intp) # these by-definition *must* be strings vecs = malloc(n * sizeof(char *)) @@ -986,7 +986,7 @@ cdef class PyObjectHashTable(HashTable): int ret = 0 object val khiter_t k - int64_t[:] locs = np.empty(n, dtype=np.int64) + intp_t[:] locs = np.empty(n, dtype=np.intp) for i in range(n): val = values[i] diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 8155e7e6c074a..e31c3739f456d 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -266,7 +266,7 @@ cdef class IndexEngine: """ cdef: ndarray values, x - ndarray[int64_t] result, missing + ndarray[intp_t] result, missing set stargets, remaining_stargets dict d = {} object val @@ -283,8 +283,8 @@ cdef class IndexEngine: else: n_alloc = n - result = np.empty(n_alloc, dtype=np.int64) - missing = np.empty(n_t, dtype=np.int64) + result = np.empty(n_alloc, dtype=np.intp) + missing = np.empty(n_t, dtype=np.intp) # map each starget to its position in the index if stargets and len(stargets) < 5 and self.is_monotonic_increasing: diff --git a/pandas/tests/base/test_misc.py b/pandas/tests/base/test_misc.py index 9523fba953ad0..b8468a5acf277 100644 --- a/pandas/tests/base/test_misc.py +++ b/pandas/tests/base/test_misc.py @@ -201,4 +201,4 @@ def test_get_indexer_non_unique_dtype_mismatch(): # GH 25459 indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0])) tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes) - tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing) + tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), missing) From 4236c86bf98bbd454f1773a3d040638430b43017 Mon Sep 17 00:00:00 2001 From: Ali McMaster Date: Sat, 19 Sep 2020 20:59:11 +0100 Subject: [PATCH 619/632] Use https for network checks (#36480) --- pandas/_testing.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/_testing.py b/pandas/_testing.py index cd34bec52daef..3e3ba480ebfeb 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -2403,7 +2403,7 @@ def can_connect(url, error_classes=None): @optional_args def network( t, - url="http://www.google.com", + url="https://www.google.com", raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, check_before_test=False, error_classes=None, @@ -2427,7 +2427,7 @@ def network( The test requiring network connectivity. url : path The url to test via ``pandas.io.common.urlopen`` to check - for connectivity. Defaults to 'http://www.google.com'. + for connectivity. Defaults to 'https://www.google.com'. raise_on_error : bool If True, never catches errors. check_before_test : bool @@ -2471,7 +2471,7 @@ def network( You can specify alternative URLs:: - >>> @network("http://www.yahoo.com") + >>> @network("https://www.yahoo.com") ... def test_something_with_yahoo(): ... raise IOError("Failure Message") >>> test_something_with_yahoo() From a90d559500e8e34a26fb33a75abb28f5f16fb743 Mon Sep 17 00:00:00 2001 From: Asish Mahapatra Date: Sat, 19 Sep 2020 16:14:05 -0400 Subject: [PATCH 620/632] BUG: Python Parser skipping over items if BOM present in first element of header (#36365) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/io/parsers.py | 10 ++++------ pandas/tests/io/parser/test_common.py | 10 ++++++++++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 7724acdd8602c..9c2e5427fcd3f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -321,6 +321,7 @@ I/O - :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue:`35058`) - :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) - :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) +- Bug in :meth:`read_csv` with `engine='python'` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`) Plotting ^^^^^^^^ diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 43ffbe6bdd66c..bc622ab8c1f18 100644 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -2886,14 +2886,12 @@ def _check_for_bom(self, first_row): # quotation mark. if len(first_row_bom) > end + 1: new_row += first_row_bom[end + 1 :] - return [new_row] + first_row[1:] - elif len(first_row_bom) > 1: - return [first_row_bom[1:]] else: - # First row is just the BOM, so we - # return an empty string. - return [""] + + # No quotation so just remove BOM from first element + new_row = first_row_bom[1:] + return [new_row] + first_row[1:] def _is_line_empty(self, line): """ diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 49358fe2ecfe4..6bbc9bc9e1788 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -2128,6 +2128,16 @@ def test_first_row_bom(all_parsers): tm.assert_frame_equal(result, expected) +def test_first_row_bom_unquoted(all_parsers): + # see gh-36343 + parser = all_parsers + data = """\ufeffHead1 Head2 Head3""" + + result = parser.read_csv(StringIO(data), delimiter="\t") + expected = DataFrame(columns=["Head1", "Head2", "Head3"]) + tm.assert_frame_equal(result, expected) + + def test_integer_precision(all_parsers): # Gh 7072 s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765 From c1484b1e9801e11f7266c4355f8f3d05bcc8ee86 Mon Sep 17 00:00:00 2001 From: Andrew Wieteska <48889395+arw2019@users.noreply.github.com> Date: Sat, 19 Sep 2020 16:24:18 -0400 Subject: [PATCH 621/632] PERF: pd.to_datetime, unit='s' much slower for float64 than for int64 (#35027) --- asv_bench/benchmarks/timeseries.py | 23 +++++++++++++ doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/tslib.pyx | 46 ++++++++++++++++---------- pandas/_libs/tslibs/conversion.pxd | 1 + pandas/tests/io/sas/data/datetime.csv | 4 +-- pandas/tests/tools/test_to_datetime.py | 8 +++-- 6 files changed, 61 insertions(+), 22 deletions(-) diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index b494dbd8a38fa..27c904dda5b45 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -263,6 +263,29 @@ def time_lookup_and_cleanup(self): self.ts.index._cleanup() +class ToDatetimeFromIntsFloats: + def setup(self): + self.ts_sec = Series(range(1521080307, 1521685107), dtype="int64") + self.ts_sec_float = self.ts_sec.astype("float64") + + self.ts_nanosec = 1_000_000 * self.ts_sec + self.ts_nanosec_float = self.ts_nanosec.astype("float64") + + # speed of int64 and float64 paths should be comparable + + def time_nanosec_int64(self): + to_datetime(self.ts_nanosec, unit="ns") + + def time_nanosec_float64(self): + to_datetime(self.ts_nanosec_float, unit="ns") + + def time_sec_int64(self): + to_datetime(self.ts_sec, unit="s") + + def time_sec_float64(self): + to_datetime(self.ts_sec_float, unit="s") + + class ToDatetimeYYYYMMDD: def setup(self): rng = date_range(start="1/1/2000", periods=10000, freq="D") diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 9c2e5427fcd3f..3402d73499d99 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -227,6 +227,7 @@ Performance improvements - Performance improvements when creating :meth:`pd.Series.map` from a huge dictionary (:issue:`34717`) - Performance improvement in :meth:`GroupBy.transform` with the ``numba`` engine (:issue:`36240`) - ``Styler`` uuid method altered to compress data transmission over web whilst maintaining reasonably low table collision probability (:issue:`36345`) +- Performance improvement in :meth:`pd.to_datetime` with non-`ns` time unit for `float` `dtype` columns (:issue:`20445`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index e4128af62d06d..b1b38505b9476 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -41,6 +41,7 @@ from pandas._libs.tslibs.conversion cimport ( cast_from_unit, convert_datetime_to_tsobject, get_datetime64_nanos, + precision_from_unit, ) from pandas._libs.tslibs.nattype cimport ( NPY_NAT, @@ -205,6 +206,7 @@ def array_with_unit_to_datetime( cdef: Py_ssize_t i, j, n=len(values) int64_t m + int prec = 0 ndarray[float64_t] fvalues bint is_ignore = errors=='ignore' bint is_coerce = errors=='coerce' @@ -217,38 +219,48 @@ def array_with_unit_to_datetime( assert is_ignore or is_coerce or is_raise - if unit == 'ns': - if issubclass(values.dtype.type, np.integer): - result = values.astype('M8[ns]') + if unit == "ns": + if issubclass(values.dtype.type, (np.integer, np.float_)): + result = values.astype("M8[ns]", copy=False) else: result, tz = array_to_datetime(values.astype(object), errors=errors) return result, tz - m = cast_from_unit(None, unit) + m, p = precision_from_unit(unit) if is_raise: - - # try a quick conversion to i8 + # try a quick conversion to i8/f8 # if we have nulls that are not type-compat # then need to iterate - if values.dtype.kind == "i": - # Note: this condition makes the casting="same_kind" redundant - iresult = values.astype('i8', casting='same_kind', copy=False) - # fill by comparing to NPY_NAT constant + + if values.dtype.kind == "i" or values.dtype.kind == "f": + iresult = values.astype("i8", copy=False) + # fill missing values by comparing to NPY_NAT mask = iresult == NPY_NAT iresult[mask] = 0 - fvalues = iresult.astype('f8') * m + fvalues = iresult.astype("f8") * m need_to_iterate = False - # check the bounds if not need_to_iterate: - - if ((fvalues < Timestamp.min.value).any() - or (fvalues > Timestamp.max.value).any()): + # check the bounds + if (fvalues < Timestamp.min.value).any() or ( + (fvalues > Timestamp.max.value).any() + ): raise OutOfBoundsDatetime(f"cannot convert input with unit '{unit}'") - result = (iresult * m).astype('M8[ns]') - iresult = result.view('i8') + + if values.dtype.kind == "i": + result = (iresult * m).astype("M8[ns]") + + elif values.dtype.kind == "f": + fresult = (values * m).astype("f8") + fresult[mask] = 0 + if prec: + fresult = round(fresult, prec) + result = fresult.astype("M8[ns]", copy=False) + + iresult = result.view("i8") iresult[mask] = NPY_NAT + return result, tz result = np.empty(n, dtype='M8[ns]') diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 73772e5ab4577..56f5481b7e781 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -24,5 +24,6 @@ cdef int64_t get_datetime64_nanos(object val) except? -1 cpdef datetime localize_pydatetime(datetime dt, object tz) cdef int64_t cast_from_unit(object ts, str unit) except? -1 +cpdef (int64_t, int) precision_from_unit(str unit) cdef int64_t normalize_i8_stamp(int64_t local_val) nogil diff --git a/pandas/tests/io/sas/data/datetime.csv b/pandas/tests/io/sas/data/datetime.csv index 6126f6d04eaf0..f0d82f7fc494e 100644 --- a/pandas/tests/io/sas/data/datetime.csv +++ b/pandas/tests/io/sas/data/datetime.csv @@ -1,5 +1,5 @@ Date1,Date2,DateTime,DateTimeHi,Taiw -1677-09-22,1677-09-22,1677-09-21 00:12:44,1677-09-21 00:12:43.145226,1912-01-01 +1677-09-22,1677-09-22,1677-09-21 00:12:44,1677-09-21 00:12:43.145225,1912-01-01 1960-01-01,1960-01-01,1960-01-01 00:00:00,1960-01-01 00:00:00.000000,1960-01-01 2016-02-29,2016-02-29,2016-02-29 23:59:59,2016-02-29 23:59:59.123456,2016-02-29 -2262-04-11,2262-04-11,2262-04-11 23:47:16,2262-04-11 23:47:16.854774,2262-04-11 +2262-04-11,2262-04-11,2262-04-11 23:47:16,2262-04-11 23:47:16.854775,2262-04-11 diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index d2049892705ea..819474e1f32e7 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1217,10 +1217,10 @@ def test_unit_mixed(self, cache): @pytest.mark.parametrize("cache", [True, False]) def test_unit_rounding(self, cache): - # GH 14156: argument will incur floating point errors but no - # premature rounding + # GH 14156 & GH 20445: argument will incur floating point errors + # but no premature rounding result = pd.to_datetime(1434743731.8770001, unit="s", cache=cache) - expected = pd.Timestamp("2015-06-19 19:55:31.877000093") + expected = pd.Timestamp("2015-06-19 19:55:31.877000192") assert result == expected @pytest.mark.parametrize("cache", [True, False]) @@ -1454,6 +1454,8 @@ def test_to_datetime_unit(self): ] + [NaT] ) + # GH20455 argument will incur floating point errors but no premature rounding + result = result.round("ms") tm.assert_series_equal(result, expected) s = pd.concat( From a22cf439e1a890b79a42ee1ca5e4dec4ca8dfd8e Mon Sep 17 00:00:00 2001 From: Avinash Pancham <44933366+avinashpancham@users.noreply.github.com> Date: Sat, 19 Sep 2020 22:29:43 +0200 Subject: [PATCH 622/632] BUG: Enable Series.equals to compare numpy arrays to scalars (#36161) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/_libs/lib.pyx | 11 ++++++++++- pandas/tests/dtypes/test_missing.py | 15 +++++++++++++++ pandas/tests/series/methods/test_equals.py | 20 ++++++++++++++++---- 4 files changed, 42 insertions(+), 5 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3402d73499d99..3ea3efb9d041f 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -270,6 +270,7 @@ Numeric ^^^^^^^ - Bug in :func:`to_numeric` where float precision was incorrect (:issue:`31364`) - Bug in :meth:`DataFrame.any` with ``axis=1`` and ``bool_only=True`` ignoring the ``bool_only`` keyword (:issue:`32432`) +- Bug in :meth:`Series.equals` where a ``ValueError`` was raised when numpy arrays were compared to scalars (:issue:`35267`) - Conversion diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index cc63df90a9a9f..a57cf3b523985 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -591,7 +591,16 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool: if "tz-naive and tz-aware" in str(err): return False raise - + except ValueError: + # Avoid raising ValueError when comparing Numpy arrays to other types + if cnp.PyArray_IsAnyScalar(x) != cnp.PyArray_IsAnyScalar(y): + # Only compare scalars to scalars and non-scalars to non-scalars + return False + elif (not (cnp.PyArray_IsPythonScalar(x) or cnp.PyArray_IsPythonScalar(y)) + and not (isinstance(x, type(y)) or isinstance(y, type(x)))): + # Check if non-scalars have the same type + return False + raise return True diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index a642b23379c6f..046b82ef3131a 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -1,3 +1,4 @@ +from contextlib import nullcontext from datetime import datetime from decimal import Decimal @@ -383,6 +384,20 @@ def test_array_equivalent(dtype_equal): assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan])) +@pytest.mark.parametrize( + "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None] +) +def test_array_equivalent_series(val): + arr = np.array([1, 2]) + cm = ( + tm.assert_produces_warning(FutureWarning, check_stacklevel=False) + if isinstance(val, str) + else nullcontext() + ) + with cm: + assert not array_equivalent(Series([arr, arr]), Series([arr, val])) + + def test_array_equivalent_different_dtype_but_equal(): # Unclear if this is exposed anywhere in the public-facing API assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0])) diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py index 600154adfcda3..cf55482fefe22 100644 --- a/pandas/tests/series/methods/test_equals.py +++ b/pandas/tests/series/methods/test_equals.py @@ -1,7 +1,10 @@ +from contextlib import nullcontext + import numpy as np import pytest from pandas import MultiIndex, Series +import pandas._testing as tm @pytest.mark.parametrize( @@ -24,16 +27,25 @@ def test_equals(arr, idx): assert not s1.equals(s2) -def test_equals_list_array(): +@pytest.mark.parametrize( + "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None] +) +def test_equals_list_array(val): # GH20676 Verify equals operator for list of Numpy arrays arr = np.array([1, 2]) s1 = Series([arr, arr]) s2 = s1.copy() assert s1.equals(s2) - # TODO: Series equals should also work between single value and list - # s1[1] = 9 - # assert not s1.equals(s2) + s1[1] = val + + cm = ( + tm.assert_produces_warning(FutureWarning, check_stacklevel=False) + if isinstance(val, str) + else nullcontext() + ) + with cm: + assert not s1.equals(s2) def test_equals_false_negative(): From 00a510bfc0416d890a385697a8a0e5127602f9ec Mon Sep 17 00:00:00 2001 From: patrick <61934744+phofl@users.noreply.github.com> Date: Sun, 20 Sep 2020 00:03:59 +0200 Subject: [PATCH 623/632] [BUG]: Rolling.sum() calculated wrong values when axis is one and dtypes are mixed (#36458) --- doc/source/whatsnew/v1.2.0.rst | 2 +- pandas/core/window/rolling.py | 8 ++++- pandas/tests/window/test_rolling.py | 48 +++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index 3ea3efb9d041f..cd53526cd4c79 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -344,7 +344,7 @@ Groupby/resample/rolling - Bug in :meth:`DataFrameGroupby.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) - Bug in :meth:`DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) -- +- Bug in :meth:`Rolling.sum()` returned wrong values when dtypes where mixed between float and integer and axis was equal to one (:issue:`20649`, :issue:`35596`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 21a7164411fb7..06c3ad23f904f 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -243,7 +243,13 @@ def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries: if self.on is not None and not isinstance(self.on, Index): if obj.ndim == 2: obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) - + if self.axis == 1: + # GH: 20649 in case of mixed dtype and axis=1 we have to convert everything + # to float to calculate the complete row at once. We exclude all non-numeric + # dtypes. + obj = obj.select_dtypes(include=["integer", "float"], exclude=["timedelta"]) + obj = obj.astype("float64", copy=False) + obj._mgr = obj._mgr.consolidate() return obj def _gotitem(self, key, ndim, subset=None): diff --git a/pandas/tests/window/test_rolling.py b/pandas/tests/window/test_rolling.py index 88afcec0f7bf4..4dfa0287bbb03 100644 --- a/pandas/tests/window/test_rolling.py +++ b/pandas/tests/window/test_rolling.py @@ -771,3 +771,51 @@ def test_rolling_numerical_too_large_numbers(): index=dates, ) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + ("func", "value"), + [("sum", 2.0), ("max", 1.0), ("min", 1.0), ("mean", 1.0), ("median", 1.0)], +) +def test_rolling_mixed_dtypes_axis_1(func, value): + # GH: 20649 + df = pd.DataFrame(1, index=[1, 2], columns=["a", "b", "c"]) + df["c"] = 1.0 + result = getattr(df.rolling(window=2, min_periods=1, axis=1), func)() + expected = pd.DataFrame( + {"a": [1.0, 1.0], "b": [value, value], "c": [value, value]}, index=[1, 2] + ) + tm.assert_frame_equal(result, expected) + + +def test_rolling_axis_one_with_nan(): + # GH: 35596 + df = pd.DataFrame( + [ + [0, 1, 2, 4, np.nan, np.nan, np.nan], + [0, 1, 2, np.nan, np.nan, np.nan, np.nan], + [0, 2, 2, np.nan, 2, np.nan, 1], + ] + ) + result = df.rolling(window=7, min_periods=1, axis="columns").sum() + expected = pd.DataFrame( + [ + [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0], + [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0], + [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0], + ] + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "value", + ["test", pd.to_datetime("2019-12-31"), pd.to_timedelta("1 days 06:05:01.00003")], +) +def test_rolling_axis_1_non_numeric_dtypes(value): + # GH: 20649 + df = pd.DataFrame({"a": [1, 2]}) + df["b"] = value + result = df.rolling(window=2, min_periods=1, axis=1).sum() + expected = pd.DataFrame({"a": [1.0, 2.0]}) + tm.assert_frame_equal(result, expected) From 475a980c6b5ff342f734c98b54c27650bbb32e51 Mon Sep 17 00:00:00 2001 From: jeschwar <36767735+jeschwar@users.noreply.github.com> Date: Sat, 19 Sep 2020 16:05:11 -0600 Subject: [PATCH 624/632] BUG: fix duplicate entries in LaTeX List of Tables when using longtable environments (#36297) --- doc/source/whatsnew/v1.2.0.rst | 1 + pandas/io/formats/latex.py | 22 +++++++++++++--- pandas/tests/io/formats/test_to_latex.py | 33 ++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 4 deletions(-) diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index cd53526cd4c79..18940b574b517 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -323,6 +323,7 @@ I/O - :meth:`to_csv` did not support zip compression for binary file object not having a filename (:issue:`35058`) - :meth:`to_csv` and :meth:`read_csv` did not honor `compression` and `encoding` for path-like objects that are internally converted to file-like objects (:issue:`35677`, :issue:`26124`, and :issue:`32392`) - :meth:`to_picke` and :meth:`read_pickle` did not support compression for file-objects (:issue:`26237`, :issue:`29054`, and :issue:`29570`) +- Bug in :func:`LongTableBuilder.middle_separator` was duplicating LaTeX longtable entires in the List of Tables of a LaTeX document (:issue:`34360`) - Bug in :meth:`read_csv` with `engine='python'` truncating data if multiple items present in first row and first element started with BOM (:issue:`36343`) Plotting diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 8080d953da308..eb35fff3a4f8e 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -431,13 +431,18 @@ class LongTableBuilder(GenericTableBuilder): >>> from pandas.io.formats import format as fmt >>> df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) >>> formatter = fmt.DataFrameFormatter(df) - >>> builder = LongTableBuilder(formatter, caption='caption', label='lab', - ... column_format='lrl') + >>> builder = LongTableBuilder(formatter, caption='a long table', + ... label='tab:long', column_format='lrl') >>> table = builder.get_result() >>> print(table) \\begin{longtable}{lrl} - \\caption{caption} - \\label{lab}\\\\ + \\caption{a long table} + \\label{tab:long}\\\\ + \\toprule + {} & a & b \\\\ + \\midrule + \\endfirsthead + \\caption[]{a long table} \\\\ \\toprule {} & a & b \\\\ \\midrule @@ -476,7 +481,16 @@ def _caption_and_label(self) -> str: @property def middle_separator(self) -> str: iterator = self._create_row_iterator(over="header") + + # the content between \endfirsthead and \endhead commands + # mitigates repeated List of Tables entries in the final LaTeX + # document when dealing with longtable environments; GH #34360 elements = [ + "\\midrule", + "\\endfirsthead", + f"\\caption[]{{{self.caption}}} \\\\" if self.caption else "", + self.top_separator, + self.header, "\\midrule", "\\endhead", "\\midrule", diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index a93ab6f9cc7aa..8df8796d236a5 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -411,6 +411,11 @@ def test_to_latex_longtable(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) withindex_result = df.to_latex(longtable=True) withindex_expected = r"""\begin{longtable}{lrl} +\toprule +{} & a & b \\ +\midrule +\endfirsthead + \toprule {} & a & b \\ \midrule @@ -430,6 +435,11 @@ def test_to_latex_longtable(self): withoutindex_result = df.to_latex(index=False, longtable=True) withoutindex_expected = r"""\begin{longtable}{rl} +\toprule + a & b \\ +\midrule +\endfirsthead + \toprule a & b \\ \midrule @@ -525,6 +535,9 @@ def test_to_latex_longtable_caption_label(self): df = DataFrame({"a": [1, 2], "b": ["b1", "b2"]}) + # test when no caption and no label is provided + # is performed by test_to_latex_longtable() + # test when only the caption is provided result_c = df.to_latex(longtable=True, caption=the_caption) @@ -533,6 +546,11 @@ def test_to_latex_longtable_caption_label(self): \toprule {} & a & b \\ \midrule +\endfirsthead +\caption[]{a table in a \texttt{longtable} environment} \\ +\toprule +{} & a & b \\ +\midrule \endhead \midrule \multicolumn{3}{r}{{Continued on next page}} \\ @@ -552,6 +570,11 @@ def test_to_latex_longtable_caption_label(self): expected_l = r"""\begin{longtable}{lrl} \label{tab:longtable}\\ +\toprule +{} & a & b \\ +\midrule +\endfirsthead + \toprule {} & a & b \\ \midrule @@ -578,6 +601,11 @@ def test_to_latex_longtable_caption_label(self): \toprule {} & a & b \\ \midrule +\endfirsthead +\caption[]{a table in a \texttt{longtable} environment} \\ +\toprule +{} & a & b \\ +\midrule \endhead \midrule \multicolumn{3}{r}{{Continued on next page}} \\ @@ -623,6 +651,11 @@ def test_to_latex_longtable_position(self): result_p = df.to_latex(longtable=True, position=the_position) expected_p = r"""\begin{longtable}[t]{lrl} +\toprule +{} & a & b \\ +\midrule +\endfirsthead + \toprule {} & a & b \\ \midrule From 2705dd6dafe53da3156ae5e25d04951b995b10aa Mon Sep 17 00:00:00 2001 From: Maxim Ivanov <41443370+ivanovmg@users.noreply.github.com> Date: Sun, 20 Sep 2020 05:05:37 +0700 Subject: [PATCH 625/632] REF: pandas/io/formats/format.py (#36434) --- pandas/io/formats/format.py | 623 +++++++++++++++++++++--------------- pandas/io/formats/html.py | 36 +-- 2 files changed, 376 insertions(+), 283 deletions(-) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 77f2a53fc7fab..7eb31daa894c9 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -10,6 +10,7 @@ from functools import partial from io import StringIO import math +from operator import itemgetter import re from shutil import get_terminal_size from typing import ( @@ -67,6 +68,7 @@ from pandas.core.indexes.api import Index, MultiIndex, PeriodIndex, ensure_index from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.reshape.concat import concat from pandas.io.common import stringify_path from pandas.io.formats.printing import adjoin, justify, pprint_thing @@ -261,17 +263,15 @@ def __init__( self._chk_truncate() def _chk_truncate(self) -> None: - from pandas.core.reshape.concat import concat - self.tr_row_num: Optional[int] min_rows = self.min_rows max_rows = self.max_rows # truncation determined by max_rows, actual truncated number of rows # used below by min_rows - truncate_v = max_rows and (len(self.series) > max_rows) + is_truncated_vertically = max_rows and (len(self.series) > max_rows) series = self.series - if truncate_v: + if is_truncated_vertically: max_rows = cast(int, max_rows) if min_rows: # if min_rows is set (not None or 0), set max_rows to minimum @@ -287,7 +287,7 @@ def _chk_truncate(self) -> None: else: self.tr_row_num = None self.tr_series = series - self.truncate_v = truncate_v + self.is_truncated_vertically = is_truncated_vertically def _get_footer(self) -> str: name = self.series.name @@ -306,7 +306,9 @@ def _get_footer(self) -> str: series_name = pprint_thing(name, escape_chars=("\t", "\r", "\n")) footer += f"Name: {series_name}" - if self.length is True or (self.length == "truncate" and self.truncate_v): + if self.length is True or ( + self.length == "truncate" and self.is_truncated_vertically + ): if footer: footer += ", " footer += f"Length: {len(self.series)}" @@ -358,7 +360,7 @@ def to_string(self) -> str: fmt_index, have_header = self._get_formatted_index() fmt_values = self._get_formatted_values() - if self.truncate_v: + if self.is_truncated_vertically: n_header_rows = 0 row_num = self.tr_row_num row_num = cast(int, row_num) @@ -451,9 +453,13 @@ def get_adjustment() -> TextAdjustment: class TableFormatter: show_dimensions: Union[bool, str] - is_truncated: bool formatters: FormattersType columns: Index + _is_truncated: bool + + @property + def is_truncated(self) -> bool: + return self._is_truncated @property def should_show_dimensions(self) -> bool: @@ -537,8 +543,6 @@ class DataFrameFormatter(TableFormatter): __doc__ = __doc__ if __doc__ else "" __doc__ += common_docstring + return_docstring - col_space: ColspaceType - def __init__( self, frame: "DataFrame", @@ -565,315 +569,409 @@ def __init__( ): self.frame = frame self.show_index_names = index_names + self.sparsify = self._initialize_sparsify(sparsify) + self.float_format = float_format + self.formatters = self._initialize_formatters(formatters) + self.na_rep = na_rep + self.decimal = decimal + self.col_space = self._initialize_colspace(col_space) + self.header = header + self.index = index + self.line_width = line_width + self.max_rows = max_rows + self.min_rows = min_rows + self.max_cols = max_cols + self.show_dimensions = show_dimensions + self.table_id = table_id + self.render_links = render_links + self.justify = self._initialize_justify(justify) + self.bold_rows = bold_rows + self.escape = escape + self.columns = self._initialize_columns(columns) - if sparsify is None: - sparsify = get_option("display.multi_sparse") + self.max_cols_fitted = self._calc_max_cols_fitted() + self.max_rows_fitted = self._calc_max_rows_fitted() - self.sparsify = sparsify + self._truncate() + self.adj = get_adjustment() - self.float_format = float_format + def _initialize_sparsify(self, sparsify: Optional[bool]) -> bool: + if sparsify is None: + return get_option("display.multi_sparse") + return sparsify + + def _initialize_formatters( + self, formatters: Optional[FormattersType] + ) -> FormattersType: if formatters is None: - self.formatters = {} - elif len(frame.columns) == len(formatters) or isinstance(formatters, dict): - self.formatters = formatters + return {} + elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict): + return formatters else: raise ValueError( f"Formatters length({len(formatters)}) should match " - f"DataFrame number of columns({len(frame.columns)})" + f"DataFrame number of columns({len(self.frame.columns)})" ) - self.na_rep = na_rep - self.decimal = decimal + + def _initialize_justify(self, justify: Optional[str]) -> str: + if justify is None: + return get_option("display.colheader_justify") + else: + return justify + + def _initialize_columns(self, columns: Optional[Sequence[str]]) -> Index: + if columns is not None: + cols = ensure_index(columns) + self.frame = self.frame[cols] + return cols + else: + return self.frame.columns + + def _initialize_colspace( + self, col_space: Optional[ColspaceArgType] + ) -> ColspaceType: + result: ColspaceType + if col_space is None: - self.col_space = {} + result = {} elif isinstance(col_space, (int, str)): - self.col_space = {"": col_space} - self.col_space.update({column: col_space for column in self.frame.columns}) + result = {"": col_space} + result.update({column: col_space for column in self.frame.columns}) elif isinstance(col_space, Mapping): for column in col_space.keys(): if column not in self.frame.columns and column != "": raise ValueError( f"Col_space is defined for an unknown column: {column}" ) - self.col_space = col_space + result = col_space else: - if len(frame.columns) != len(col_space): + if len(self.frame.columns) != len(col_space): raise ValueError( f"Col_space length({len(col_space)}) should match " - f"DataFrame number of columns({len(frame.columns)})" + f"DataFrame number of columns({len(self.frame.columns)})" ) - self.col_space = dict(zip(self.frame.columns, col_space)) + result = dict(zip(self.frame.columns, col_space)) + return result - self.header = header - self.index = index - self.line_width = line_width - self.max_rows = max_rows - self.min_rows = min_rows - self.max_cols = max_cols - self.max_rows_displayed = min(max_rows or len(self.frame), len(self.frame)) - self.show_dimensions = show_dimensions - self.table_id = table_id - self.render_links = render_links + @property + def max_rows_displayed(self) -> int: + return min(self.max_rows or len(self.frame), len(self.frame)) - if justify is None: - self.justify = get_option("display.colheader_justify") + def _calc_max_cols_fitted(self) -> Optional[int]: + """Number of columns fitting the screen.""" + if not self._is_in_terminal(): + return self.max_cols + + width, _ = get_terminal_size() + if self._is_screen_narrow(width): + return width else: - self.justify = justify + return self.max_cols - self.bold_rows = bold_rows - self.escape = escape + def _calc_max_rows_fitted(self) -> Optional[int]: + """Number of rows with data fitting the screen.""" + if not self._is_in_terminal(): + return self.max_rows - if columns is not None: - self.columns = ensure_index(columns) - self.frame = self.frame[self.columns] + _, height = get_terminal_size() + if self.max_rows == 0: + # rows available to fill with actual data + return height - self._get_number_of_auxillary_rows() + + max_rows: Optional[int] + if self._is_screen_short(height): + max_rows = height else: - self.columns = frame.columns + max_rows = self.max_rows - self._chk_truncate() - self.adj = get_adjustment() + if max_rows: + if (len(self.frame) > max_rows) and self.min_rows: + # if truncated, set max_rows showed to min_rows + max_rows = min(self.min_rows, max_rows) + return max_rows - def _chk_truncate(self) -> None: + def _is_in_terminal(self) -> bool: + """Check if the output is to be shown in terminal.""" + return bool(self.max_cols == 0 or self.max_rows == 0) + + def _is_screen_narrow(self, max_width) -> bool: + return bool(self.max_cols == 0 and len(self.frame.columns) > max_width) + + def _is_screen_short(self, max_height) -> bool: + return bool(self.max_rows == 0 and len(self.frame) > max_height) + + def _get_number_of_auxillary_rows(self) -> int: + """Get number of rows occupied by prompt, dots and dimension info.""" + dot_row = 1 + prompt_row = 1 + num_rows = dot_row + prompt_row + + if self.show_dimensions: + num_rows += len(self._dimensions_info.splitlines()) + + if self.header: + num_rows += 1 + + return num_rows + + @property + def is_truncated_horizontally(self) -> bool: + return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted)) + + @property + def is_truncated_vertically(self) -> bool: + return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted)) + + @property + def is_truncated(self) -> bool: + return bool(self.is_truncated_horizontally or self.is_truncated_vertically) + + def _truncate(self) -> None: """ - Checks whether the frame should be truncated. If so, slices - the frame up. + Check whether the frame should be truncated. If so, slice the frame up. """ - from pandas.core.reshape.concat import concat + self.tr_frame = self.frame.copy() - # Cut the data to the information actually printed - max_cols = self.max_cols - max_rows = self.max_rows - self.max_rows_adj: Optional[int] - max_rows_adj: Optional[int] - - if max_cols == 0 or max_rows == 0: # assume we are in the terminal - (w, h) = get_terminal_size() - self.w = w - self.h = h - if self.max_rows == 0: - dot_row = 1 - prompt_row = 1 - if self.show_dimensions: - show_dimension_rows = 3 - # assume we only get here if self.header is boolean. - # i.e. not to_latex() where self.header may be List[str] - self.header = cast(bool, self.header) - n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row - # rows available to fill with actual data - max_rows_adj = self.h - n_add_rows - self.max_rows_adj = max_rows_adj - - # Format only rows and columns that could potentially fit the - # screen - if max_cols == 0 and len(self.frame.columns) > w: - max_cols = w - if max_rows == 0 and len(self.frame) > h: - max_rows = h - - if not hasattr(self, "max_rows_adj"): - if max_rows: - if (len(self.frame) > max_rows) and self.min_rows: - # if truncated, set max_rows showed to min_rows - max_rows = min(self.min_rows, max_rows) - self.max_rows_adj = max_rows - if not hasattr(self, "max_cols_adj"): - self.max_cols_adj = max_cols - - max_cols_adj = self.max_cols_adj - max_rows_adj = self.max_rows_adj - - truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj) - truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj) - - frame = self.frame - if truncate_h: - # cast here since if truncate_h is True, max_cols_adj is not None - max_cols_adj = cast(int, max_cols_adj) - if max_cols_adj == 0: - col_num = len(frame.columns) - elif max_cols_adj == 1: - max_cols = cast(int, max_cols) - frame = frame.iloc[:, :max_cols] - col_num = max_cols - else: - col_num = max_cols_adj // 2 - frame = concat( - (frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1 - ) - # truncate formatter - if isinstance(self.formatters, (list, tuple)): - truncate_fmt = self.formatters - self.formatters = [ - *truncate_fmt[:col_num], - *truncate_fmt[-col_num:], - ] - self.tr_col_num = col_num - if truncate_v: - # cast here since if truncate_v is True, max_rows_adj is not None - max_rows_adj = cast(int, max_rows_adj) - if max_rows_adj == 1: - row_num = max_rows - frame = frame.iloc[:max_rows, :] - else: - row_num = max_rows_adj // 2 - frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :])) - self.tr_row_num = row_num - else: - self.tr_row_num = None + if self.is_truncated_horizontally: + self._truncate_horizontally() - self.tr_frame = frame - self.truncate_h = truncate_h - self.truncate_v = truncate_v - self.is_truncated = bool(self.truncate_h or self.truncate_v) + if self.is_truncated_vertically: + self._truncate_vertically() - def _to_str_columns(self) -> List[List[str]]: - """ - Render a DataFrame to a list of columns (as lists of strings). + def _truncate_horizontally(self) -> None: + """Remove columns, which are not to be displayed and adjust formatters. + + Attributes affected: + - tr_frame + - formatters + - tr_col_num """ - # this method is not used by to_html where self.col_space - # could be a string so safe to cast - col_space = {k: cast(int, v) for k, v in self.col_space.items()} + assert self.max_cols_fitted is not None + col_num = self.max_cols_fitted // 2 + if col_num >= 1: + cols_to_keep = [ + x + for x in range(self.frame.shape[1]) + if x < col_num or x >= len(self.frame.columns) - col_num + ] + self.tr_frame = self.tr_frame.iloc[:, cols_to_keep] - frame = self.tr_frame - # may include levels names also + # truncate formatter + if isinstance(self.formatters, (list, tuple)): + slicer = itemgetter(*cols_to_keep) + self.formatters = slicer(self.formatters) + else: + col_num = cast(int, self.max_cols) + self.tr_frame = self.tr_frame.iloc[:, :col_num] + self.tr_col_num = col_num - str_index = self._get_formatted_index(frame) + def _truncate_vertically(self) -> None: + """Remove rows, which are not to be displayed. + + Attributes affected: + - tr_frame + - tr_row_num + """ + assert self.max_rows_fitted is not None + row_num = self.max_rows_fitted // 2 + if row_num >= 1: + rows_to_keep = [ + x + for x in range(self.frame.shape[0]) + if x < row_num or x >= len(self.frame) - row_num + ] + self.tr_frame = self.tr_frame.iloc[rows_to_keep, :] + else: + row_num = cast(int, self.max_rows) + self.tr_frame = self.tr_frame.iloc[:row_num, :] + self.tr_row_num = row_num + + def _get_strcols_without_index(self) -> List[List[str]]: + strcols: List[List[str]] = [] if not is_list_like(self.header) and not self.header: - stringified = [] - for i, c in enumerate(frame): + for i, c in enumerate(self.tr_frame): fmt_values = self._format_col(i) fmt_values = _make_fixed_width( - fmt_values, self.justify, minimum=col_space.get(c, 0), adj=self.adj + strings=fmt_values, + justify=self.justify, + minimum=int(self.col_space.get(c, 0)), + adj=self.adj, + ) + strcols.append(fmt_values) + return strcols + + if is_list_like(self.header): + # cast here since can't be bool if is_list_like + self.header = cast(List[str], self.header) + if len(self.header) != len(self.columns): + raise ValueError( + f"Writing {len(self.columns)} cols " + f"but got {len(self.header)} aliases" ) - stringified.append(fmt_values) + str_columns = [[label] for label in self.header] else: - if is_list_like(self.header): - # cast here since can't be bool if is_list_like - self.header = cast(List[str], self.header) - if len(self.header) != len(self.columns): - raise ValueError( - f"Writing {len(self.columns)} cols " - f"but got {len(self.header)} aliases" - ) - str_columns = [[label] for label in self.header] - else: - str_columns = self._get_formatted_column_labels(frame) + str_columns = self._get_formatted_column_labels(self.tr_frame) - if self.show_row_idx_names: - for x in str_columns: - x.append("") + if self.show_row_idx_names: + for x in str_columns: + x.append("") - stringified = [] - for i, c in enumerate(frame): - cheader = str_columns[i] - header_colwidth = max( - col_space.get(c, 0), *(self.adj.len(x) for x in cheader) - ) - fmt_values = self._format_col(i) - fmt_values = _make_fixed_width( - fmt_values, self.justify, minimum=header_colwidth, adj=self.adj - ) + for i, c in enumerate(self.tr_frame): + cheader = str_columns[i] + header_colwidth = max( + int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader) + ) + fmt_values = self._format_col(i) + fmt_values = _make_fixed_width( + fmt_values, self.justify, minimum=header_colwidth, adj=self.adj + ) + + max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth) + cheader = self.adj.justify(cheader, max_len, mode=self.justify) + strcols.append(cheader + fmt_values) + + return strcols - max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth) - cheader = self.adj.justify(cheader, max_len, mode=self.justify) - stringified.append(cheader + fmt_values) + def _get_strcols(self) -> List[List[str]]: + strcols = self._get_strcols_without_index() - strcols = stringified + str_index = self._get_formatted_index(self.tr_frame) if self.index: strcols.insert(0, str_index) - # Add ... to signal truncated - truncate_h = self.truncate_h - truncate_v = self.truncate_v + return strcols - if truncate_h: - col_num = self.tr_col_num - strcols.insert(self.tr_col_num + 1, [" ..."] * (len(str_index))) - if truncate_v: - n_header_rows = len(str_index) - len(frame) - row_num = self.tr_row_num - # cast here since if truncate_v is True, self.tr_row_num is not None - row_num = cast(int, row_num) - for ix, col in enumerate(strcols): - # infer from above row - cwidth = self.adj.len(strcols[ix][row_num]) + def _to_str_columns(self) -> List[List[str]]: + """ + Render a DataFrame to a list of columns (as lists of strings). + """ + strcols = self._get_strcols() + + if self.is_truncated: + strcols = self._insert_dot_separators(strcols) + + return strcols + + def _insert_dot_separators(self, strcols: List[List[str]]) -> List[List[str]]: + str_index = self._get_formatted_index(self.tr_frame) + index_length = len(str_index) + + if self.is_truncated_horizontally: + strcols = self._insert_dot_separator_horizontal(strcols, index_length) + + if self.is_truncated_vertically: + strcols = self._insert_dot_separator_vertical(strcols, index_length) + + return strcols + + def _insert_dot_separator_horizontal( + self, strcols: List[List[str]], index_length: int + ) -> List[List[str]]: + strcols.insert(self.tr_col_num + 1, [" ..."] * index_length) + return strcols + + def _insert_dot_separator_vertical( + self, strcols: List[List[str]], index_length: int + ) -> List[List[str]]: + n_header_rows = index_length - len(self.tr_frame) + row_num = self.tr_row_num + for ix, col in enumerate(strcols): + cwidth = self.adj.len(col[row_num]) + + if self.is_truncated_horizontally: + is_dot_col = ix == self.tr_col_num + 1 + else: is_dot_col = False - if truncate_h: - is_dot_col = ix == col_num + 1 - if cwidth > 3 or is_dot_col: - my_str = "..." - else: - my_str = ".." - if ix == 0: - dot_mode = "left" - elif is_dot_col: - cwidth = 4 - dot_mode = "right" - else: - dot_mode = "right" - dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0] - strcols[ix].insert(row_num + n_header_rows, dot_str) + if cwidth > 3 or is_dot_col: + dots = "..." + else: + dots = ".." + + if ix == 0: + dot_mode = "left" + elif is_dot_col: + cwidth = 4 + dot_mode = "right" + else: + dot_mode = "right" + + dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0] + col.insert(row_num + n_header_rows, dot_str) return strcols def write_result(self, buf: IO[str]) -> None: """ Render a DataFrame to a console-friendly tabular output. """ - from pandas import Series + text = self._get_string_representation() + + buf.writelines(text) + + if self.should_show_dimensions: + buf.write(self._dimensions_info) - frame = self.frame + @property + def _dimensions_info(self) -> str: + return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]" - if len(frame.columns) == 0 or len(frame.index) == 0: + def _get_string_representation(self) -> str: + if self.frame.empty: info_line = ( f"Empty {type(self.frame).__name__}\n" - f"Columns: {pprint_thing(frame.columns)}\n" - f"Index: {pprint_thing(frame.index)}" + f"Columns: {pprint_thing(self.frame.columns)}\n" + f"Index: {pprint_thing(self.frame.index)}" ) - text = info_line - else: + return info_line - strcols = self._to_str_columns() - if self.line_width is None: # no need to wrap around just print - # the whole frame - text = self.adj.adjoin(1, *strcols) - elif ( - not isinstance(self.max_cols, int) or self.max_cols > 0 - ): # need to wrap around - text = self._join_multiline(*strcols) - else: # max_cols == 0. Try to fit frame to terminal - lines = self.adj.adjoin(1, *strcols).split("\n") - max_len = Series(lines).str.len().max() - # plus truncate dot col - dif = max_len - self.w - # '+ 1' to avoid too wide repr (GH PR #17023) - adj_dif = dif + 1 - col_lens = Series([Series(ele).apply(len).max() for ele in strcols]) - n_cols = len(col_lens) - counter = 0 - while adj_dif > 0 and n_cols > 1: - counter += 1 - mid = int(round(n_cols / 2.0)) - mid_ix = col_lens.index[mid] - col_len = col_lens[mid_ix] - # adjoin adds one - adj_dif -= col_len + 1 - col_lens = col_lens.drop(mid_ix) - n_cols = len(col_lens) - # subtract index column - max_cols_adj = n_cols - self.index - # GH-21180. Ensure that we print at least two. - max_cols_adj = max(max_cols_adj, 2) - self.max_cols_adj = max_cols_adj - - # Call again _chk_truncate to cut frame appropriately - # and then generate string representation - self._chk_truncate() - strcols = self._to_str_columns() - text = self.adj.adjoin(1, *strcols) - buf.writelines(text) + strcols = self._to_str_columns() - if self.should_show_dimensions: - buf.write(f"\n\n[{len(frame)} rows x {len(frame.columns)} columns]") + if self.line_width is None: + # no need to wrap around just print the whole frame + return self.adj.adjoin(1, *strcols) + + if self.max_cols is None or self.max_cols > 0: + # need to wrap around + return self._join_multiline(*strcols) + + # max_cols == 0. Try to fit frame to terminal + return self._fit_strcols_to_terminal_width(strcols) + + def _fit_strcols_to_terminal_width(self, strcols) -> str: + from pandas import Series + + lines = self.adj.adjoin(1, *strcols).split("\n") + max_len = Series(lines).str.len().max() + # plus truncate dot col + width, _ = get_terminal_size() + dif = max_len - width + # '+ 1' to avoid too wide repr (GH PR #17023) + adj_dif = dif + 1 + col_lens = Series([Series(ele).apply(len).max() for ele in strcols]) + n_cols = len(col_lens) + counter = 0 + while adj_dif > 0 and n_cols > 1: + counter += 1 + mid = int(round(n_cols / 2.0)) + mid_ix = col_lens.index[mid] + col_len = col_lens[mid_ix] + # adjoin adds one + adj_dif -= col_len + 1 + col_lens = col_lens.drop(mid_ix) + n_cols = len(col_lens) + + # subtract index column + max_cols_fitted = n_cols - self.index + # GH-21180. Ensure that we print at least two. + max_cols_fitted = max(max_cols_fitted, 2) + self.max_cols_fitted = max_cols_fitted + + # Call again _truncate to cut frame appropriately + # and then generate string representation + self._truncate() + strcols = self._to_str_columns() + return self.adj.adjoin(1, *strcols) def _join_multiline(self, *args) -> str: lwidth = self.line_width @@ -892,26 +990,25 @@ def _join_multiline(self, *args) -> str: col_bins = _binify(col_widths, lwidth) nbins = len(col_bins) - if self.truncate_v: - # cast here since if truncate_v is True, max_rows_adj is not None - self.max_rows_adj = cast(int, self.max_rows_adj) - nrows = self.max_rows_adj + 1 + if self.is_truncated_vertically: + assert self.max_rows_fitted is not None + nrows = self.max_rows_fitted + 1 else: nrows = len(self.frame) str_lst = [] - st = 0 - for i, ed in enumerate(col_bins): - row = strcols[st:ed] + start = 0 + for i, end in enumerate(col_bins): + row = strcols[start:end] if self.index: row.insert(0, idx) if nbins > 1: - if ed <= len(strcols) and i < nbins - 1: + if end <= len(strcols) and i < nbins - 1: row.append([" \\"] + [" "] * (nrows - 1)) else: row.append([" "] * nrows) str_lst.append(self.adj.adjoin(adjoin_width, *row)) - st = ed + start = end return "\n\n".join(str_lst) def to_string( diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index c89189f1e679a..c8eb89afdd849 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -85,10 +85,8 @@ def row_levels(self) -> int: def _get_columns_formatted_values(self) -> Iterable: return self.columns - # https://github.com/python/mypy/issues/1237 - # error: Signature of "is_truncated" incompatible with supertype "TableFormatter" @property - def is_truncated(self) -> bool: # type: ignore[override] + def is_truncated(self) -> bool: return self.fmt.is_truncated @property @@ -236,7 +234,7 @@ def _write_table(self, indent: int = 0) -> None: self.write("
    l0
    ", indent) def _write_col_header(self, indent: int) -> None: - truncate_h = self.fmt.truncate_h + is_truncated_horizontally = self.fmt.is_truncated_horizontally if isinstance(self.columns, MultiIndex): template = 'colspan="{span:d}" halign="left"' @@ -249,7 +247,7 @@ def _write_col_header(self, indent: int) -> None: level_lengths = get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 for lnum, (records, values) in enumerate(zip(level_lengths, levels)): - if truncate_h: + if is_truncated_horizontally: # modify the header lines ins_col = self.fmt.tr_col_num if self.fmt.sparsify: @@ -346,16 +344,16 @@ def _write_col_header(self, indent: int) -> None: row.extend(self._get_columns_formatted_values()) align = self.fmt.justify - if truncate_h: + if is_truncated_horizontally: ins_col = self.row_levels + self.fmt.tr_col_num row.insert(ins_col, "...") self.write_tr(row, indent, self.indent_delta, header=True, align=align) def _write_row_header(self, indent: int) -> None: - truncate_h = self.fmt.truncate_h + is_truncated_horizontally = self.fmt.is_truncated_horizontally row = [x if x is not None else "" for x in self.frame.index.names] + [""] * ( - self.ncols + (1 if truncate_h else 0) + self.ncols + (1 if is_truncated_horizontally else 0) ) self.write_tr(row, indent, self.indent_delta, header=True) @@ -390,8 +388,8 @@ def _write_body(self, indent: int) -> None: def _write_regular_rows( self, fmt_values: Mapping[int, List[str]], indent: int ) -> None: - truncate_h = self.fmt.truncate_h - truncate_v = self.fmt.truncate_v + is_truncated_horizontally = self.fmt.is_truncated_horizontally + is_truncated_vertically = self.fmt.is_truncated_vertically nrows = len(self.fmt.tr_frame) @@ -405,7 +403,7 @@ def _write_regular_rows( row: List[str] = [] for i in range(nrows): - if truncate_v and i == (self.fmt.tr_row_num): + if is_truncated_vertically and i == (self.fmt.tr_row_num): str_sep_row = ["..."] * len(row) self.write_tr( str_sep_row, @@ -426,7 +424,7 @@ def _write_regular_rows( row.append("") row.extend(fmt_values[j][i] for j in range(self.ncols)) - if truncate_h: + if is_truncated_horizontally: dot_col_ix = self.fmt.tr_col_num + self.row_levels row.insert(dot_col_ix, "...") self.write_tr( @@ -438,8 +436,8 @@ def _write_hierarchical_rows( ) -> None: template = 'rowspan="{span}" valign="top"' - truncate_h = self.fmt.truncate_h - truncate_v = self.fmt.truncate_v + is_truncated_horizontally = self.fmt.is_truncated_horizontally + is_truncated_vertically = self.fmt.is_truncated_vertically frame = self.fmt.tr_frame nrows = len(frame) @@ -454,12 +452,10 @@ def _write_hierarchical_rows( level_lengths = get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 - if truncate_v: + if is_truncated_vertically: # Insert ... row and adjust idx_values and # level_lengths to take this into account. ins_row = self.fmt.tr_row_num - # cast here since if truncate_v is True, self.fmt.tr_row_num is not None - ins_row = cast(int, ins_row) inserted = False for lnum, records in enumerate(level_lengths): rec_new = {} @@ -520,7 +516,7 @@ def _write_hierarchical_rows( row.append(v) row.extend(fmt_values[j][i] for j in range(self.ncols)) - if truncate_h: + if is_truncated_horizontally: row.insert( self.row_levels - sparse_offset + self.fmt.tr_col_num, "..." ) @@ -534,7 +530,7 @@ def _write_hierarchical_rows( else: row = [] for i in range(len(frame)): - if truncate_v and i == (self.fmt.tr_row_num): + if is_truncated_vertically and i == (self.fmt.tr_row_num): str_sep_row = ["..."] * len(row) self.write_tr( str_sep_row, @@ -550,7 +546,7 @@ def _write_hierarchical_rows( row = [] row.extend(idx_values[i]) row.extend(fmt_values[j][i] for j in range(self.ncols)) - if truncate_h: + if is_truncated_horizontally: row.insert(self.row_levels + self.fmt.tr_col_num, "...") self.write_tr( row, From c33c3c03c227d53139d6b9a2b6bc1dc1f9400542 Mon Sep 17 00:00:00 2001 From: Yanxian Lin Date: Sat, 19 Sep 2020 15:07:24 -0700 Subject: [PATCH 626/632] TST: #31922 assert no segmentation fault with numpy.array.__contains__ (#36283) --- pandas/tests/scalar/test_na_scalar.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index 5c4d7e191d1bb..10d366fe485da 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -305,3 +305,11 @@ def test_pickle_roundtrip_containers(as_frame, values, dtype): s = s.to_frame(name="A") result = tm.round_trip_pickle(s) tm.assert_equal(result, s) + + +@pytest.mark.parametrize("array", [np.array(["a"], dtype=object), ["a"]]) +def test_array_contains_na(array): + # GH 31922 + msg = "boolean value of NA is ambiguous" + with pytest.raises(TypeError, match=msg): + NA in array From d38dc0652fd97b2ea56cff6696261bd628eb2f96 Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 19 Sep 2020 15:53:29 -0700 Subject: [PATCH 627/632] REF: de-duplicate IntervalArray._validate_foo (#36483) --- pandas/core/arrays/interval.py | 62 +++++++++---------- pandas/tests/arrays/interval/test_interval.py | 4 ++ .../tests/indexes/interval/test_interval.py | 7 ++- 3 files changed, 39 insertions(+), 34 deletions(-) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index f9f68004bcc23..ebabc7edcbf43 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -20,7 +20,6 @@ is_datetime64_any_dtype, is_float_dtype, is_integer_dtype, - is_interval, is_interval_dtype, is_list_like, is_object_dtype, @@ -813,7 +812,9 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): fill_left = fill_right = fill_value if allow_fill: - fill_left, fill_right = self._validate_fill_value(fill_value) + if (np.asarray(indices) == -1).any(): + # We have excel tests that pass fill_value=True, xref GH#36466 + fill_left, fill_right = self._validate_fill_value(fill_value) left_take = take( self.left, indices, allow_fill=allow_fill, fill_value=fill_left @@ -824,20 +825,33 @@ def take(self, indices, allow_fill=False, fill_value=None, axis=None, **kwargs): return self._shallow_copy(left_take, right_take) - def _validate_fill_value(self, value): - if is_interval(value): - self._check_closed_matches(value, name="fill_value") - fill_left, fill_right = value.left, value.right - elif not is_scalar(value) and notna(value): - msg = ( - "'IntervalArray.fillna' only supports filling with a " - "'scalar pandas.Interval or NA'. " - f"Got a '{type(value).__name__}' instead." - ) - raise ValueError(msg) + def _validate_listlike(self, value): + # list-like of intervals + try: + array = IntervalArray(value) + # TODO: self._check_closed_matches(array, name="value") + value_left, value_right = array.left, array.right + except TypeError as err: + # wrong type: not interval or NA + msg = f"'value' should be an interval type, got {type(value)} instead." + raise TypeError(msg) from err + return value_left, value_right + + def _validate_scalar(self, value): + if isinstance(value, Interval): + self._check_closed_matches(value, name="value") + left, right = value.left, value.right + elif is_valid_nat_for_dtype(value, self.left.dtype): + # GH#18295 + left = right = value else: - fill_left = fill_right = self.left._na_value - return fill_left, fill_right + raise ValueError( + "can only insert Interval objects and NA into an IntervalArray" + ) + return left, right + + def _validate_fill_value(self, value): + return self._validate_scalar(value) def _validate_fillna_value(self, value): if not isinstance(value, Interval): @@ -851,26 +865,12 @@ def _validate_fillna_value(self, value): return value.left, value.right def _validate_insert_value(self, value): - if isinstance(value, Interval): - if value.closed != self.closed: - raise ValueError( - "inserted item must be closed on the same side as the index" - ) - left_insert = value.left - right_insert = value.right - elif is_valid_nat_for_dtype(value, self.left.dtype): - # GH#18295 - left_insert = right_insert = value - else: - raise ValueError( - "can only insert Interval objects and NA into an IntervalIndex" - ) - return left_insert, right_insert + return self._validate_scalar(value) def _validate_setitem_value(self, value): needs_float_conversion = False - if is_scalar(value) and isna(value): + if is_valid_nat_for_dtype(value, self.left.dtype): # na value: need special casing to set directly on numpy arrays if is_integer_dtype(self.dtype.subtype): # can't set NaN on a numpy integer array diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index 0176755b54dd1..e5ccb51ce36f5 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -105,6 +105,10 @@ def test_set_na(self, left_right_dtypes): left, right = left_right_dtypes result = IntervalArray.from_arrays(left, right) + if result.dtype.subtype.kind not in ["m", "M"]: + msg = "'value' should be an interval type, got <.*NaTType'> instead." + with pytest.raises(TypeError, match=msg): + result[0] = pd.NaT if result.dtype.subtype.kind in ["i", "u"]: msg = "Cannot set float NaN to integer-backed IntervalArray" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 734c98af3d058..b81f0f27e60ad 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -191,13 +191,14 @@ def test_insert(self, data): tm.assert_index_equal(result, expected) # invalid type - msg = "can only insert Interval objects and NA into an IntervalIndex" + msg = "can only insert Interval objects and NA into an IntervalArray" with pytest.raises(ValueError, match=msg): data.insert(1, "foo") # invalid closed - msg = "inserted item must be closed on the same side as the index" + msg = "'value.closed' is 'left', expected 'right'." for closed in {"left", "right", "both", "neither"} - {item.closed}: + msg = f"'value.closed' is '{closed}', expected '{item.closed}'." with pytest.raises(ValueError, match=msg): bad_item = Interval(item.left, item.right, closed=closed) data.insert(1, bad_item) @@ -211,7 +212,7 @@ def test_insert(self, data): if data.left.dtype.kind not in ["m", "M"]: # trying to insert pd.NaT into a numeric-dtyped Index should cast/raise - msg = "can only insert Interval objects and NA into an IntervalIndex" + msg = "can only insert Interval objects and NA into an IntervalArray" with pytest.raises(ValueError, match=msg): result = data.insert(1, pd.NaT) else: From 7e13b19712ce39424c8c75880e80375141575b1b Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Sat, 19 Sep 2020 19:13:12 -0700 Subject: [PATCH 628/632] TYP: core.missing; PERF for needs_i8_conversion (#36485) --- pandas/core/arrays/datetimelike.py | 14 ++------------ pandas/core/dtypes/common.py | 4 ++++ pandas/core/missing.py | 31 +++++++++++++++--------------- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 026aad5ad6eb7..45cabe8f0b498 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1005,19 +1005,9 @@ def fillna(self, value=None, method=None, limit=None): else: func = missing.backfill_1d - values = self._ndarray - if not is_period_dtype(self.dtype): - # For PeriodArray self._ndarray is i8, which gets copied - # by `func`. Otherwise we need to make a copy manually - # to avoid modifying `self` in-place. - values = values.copy() - + values = self.copy() new_values = func(values, limit=limit, mask=mask) - if is_datetime64tz_dtype(self.dtype): - # we need to pass int64 values to the constructor to avoid - # re-localizing incorrectly - new_values = new_values.view("i8") - new_values = type(self)(new_values, dtype=self.dtype) + new_values = self._from_backing_data(new_values) else: # fill with value new_values = self.copy() diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5987fdabf78bb..acbdbfd7707e3 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1215,6 +1215,10 @@ def needs_i8_conversion(arr_or_dtype) -> bool: """ if arr_or_dtype is None: return False + if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)): + # fastpath + dtype = arr_or_dtype + return dtype.kind in ["m", "M"] or dtype.type is Period return ( is_datetime_or_timedelta_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index be66b19d10064..9b96c8f01153b 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -7,17 +7,15 @@ import numpy as np from pandas._libs import algos, lib +from pandas._typing import DtypeObj from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.cast import infer_dtype_from_array from pandas.core.dtypes.common import ( ensure_float64, - is_datetime64_dtype, - is_datetime64tz_dtype, is_integer_dtype, is_numeric_v_string_like, is_scalar, - is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import isna @@ -72,7 +70,7 @@ def mask_missing(arr, values_to_mask): return mask -def clean_fill_method(method, allow_nearest=False): +def clean_fill_method(method, allow_nearest: bool = False): # asfreq is compat for resampling if method in [None, "asfreq"]: return None @@ -543,7 +541,12 @@ def _cubicspline_interpolate(xi, yi, x, axis=0, bc_type="not-a-knot", extrapolat def interpolate_2d( - values, method="pad", axis=0, limit=None, fill_value=None, dtype=None + values, + method="pad", + axis=0, + limit=None, + fill_value=None, + dtype: Optional[DtypeObj] = None, ): """ Perform an actual interpolation of values, values will be make 2-d if @@ -584,18 +587,14 @@ def interpolate_2d( return values -def _cast_values_for_fillna(values, dtype): +def _cast_values_for_fillna(values, dtype: DtypeObj): """ Cast values to a dtype that algos.pad and algos.backfill can handle. """ # TODO: for int-dtypes we make a copy, but for everything else this # alters the values in-place. Is this intentional? - if ( - is_datetime64_dtype(dtype) - or is_datetime64tz_dtype(dtype) - or is_timedelta64_dtype(dtype) - ): + if needs_i8_conversion(dtype): values = values.view(np.int64) elif is_integer_dtype(values): @@ -605,7 +604,7 @@ def _cast_values_for_fillna(values, dtype): return values -def _fillna_prep(values, mask=None, dtype=None): +def _fillna_prep(values, mask=None, dtype: Optional[DtypeObj] = None): # boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d if dtype is None: dtype = values.dtype @@ -620,19 +619,19 @@ def _fillna_prep(values, mask=None, dtype=None): return values, mask -def pad_1d(values, limit=None, mask=None, dtype=None): +def pad_1d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None): values, mask = _fillna_prep(values, mask, dtype) algos.pad_inplace(values, mask, limit=limit) return values -def backfill_1d(values, limit=None, mask=None, dtype=None): +def backfill_1d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None): values, mask = _fillna_prep(values, mask, dtype) algos.backfill_inplace(values, mask, limit=limit) return values -def pad_2d(values, limit=None, mask=None, dtype=None): +def pad_2d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None): values, mask = _fillna_prep(values, mask, dtype) if np.all(values.shape): @@ -643,7 +642,7 @@ def pad_2d(values, limit=None, mask=None, dtype=None): return values -def backfill_2d(values, limit=None, mask=None, dtype=None): +def backfill_2d(values, limit=None, mask=None, dtype: Optional[DtypeObj] = None): values, mask = _fillna_prep(values, mask, dtype) if np.all(values.shape): From 15539fa6217039c2e5cb70b1d942caec9c5a2de3 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Sat, 19 Sep 2020 21:13:54 -0500 Subject: [PATCH 629/632] Don't unlabel stale PR on update (#36487) --- .github/workflows/stale-pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale-pr.yml b/.github/workflows/stale-pr.yml index e3b8d9336a5a6..e77bf2b81fc86 100644 --- a/.github/workflows/stale-pr.yml +++ b/.github/workflows/stale-pr.yml @@ -17,5 +17,5 @@ jobs: exempt-pr-labels: "Needs Review,Blocked,Needs Discussion" days-before-stale: 30 days-before-close: -1 - remove-stale-when-updated: true + remove-stale-when-updated: false debug-only: false From a0d6d061de42c54b8071d28a4bf60cd69853c388 Mon Sep 17 00:00:00 2001 From: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> Date: Mon, 21 Sep 2020 01:47:18 -0500 Subject: [PATCH 630/632] BUG: Fix astype from float32 to string (#36464) --- doc/source/whatsnew/v1.1.3.rst | 1 + pandas/_libs/lib.pyx | 3 ++- pandas/core/arrays/string_.py | 3 +-- pandas/tests/arrays/string_/test_string.py | 9 +++++++++ pandas/tests/series/methods/test_astype.py | 9 +++++++++ 5 files changed, 22 insertions(+), 3 deletions(-) diff --git a/doc/source/whatsnew/v1.1.3.rst b/doc/source/whatsnew/v1.1.3.rst index 7d658215d7b76..72937141c2870 100644 --- a/doc/source/whatsnew/v1.1.3.rst +++ b/doc/source/whatsnew/v1.1.3.rst @@ -47,6 +47,7 @@ Bug fixes - Bug in :class:`Series` constructor where integer overflow would occur for sufficiently large scalar inputs when an index was provided (:issue:`36291`) - Bug in :meth:`DataFrame.sort_values` raising an ``AttributeError`` when sorting on a key that casts column to categorical dtype (:issue:`36383`) - Bug in :meth:`DataFrame.stack` raising a ``ValueError`` when stacking :class:`MultiIndex` columns based on position when the levels had duplicate names (:issue:`36353`) +- Bug in :meth:`Series.astype` showing too much precision when casting from ``np.float32`` to string dtype (:issue:`36451`) - Bug in :meth:`Series.isin` and :meth:`DataFrame.isin` when using ``NaN`` and a row length above 1,000,000 (:issue:`22205`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index a57cf3b523985..61a9634b00211 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -659,11 +659,12 @@ cpdef ndarray[object] ensure_string_array( Py_ssize_t i = 0, n = len(arr) result = np.asarray(arr, dtype="object") + if copy and result is arr: result = result.copy() for i in range(n): - val = result[i] + val = arr[i] if isinstance(val, str): continue diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index cef35f2b1137c..cb1144c18e49c 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -198,10 +198,9 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): if dtype: assert dtype == "string" - result = np.asarray(scalars, dtype="object") # convert non-na-likes to str, and nan-likes to StringDtype.na_value result = lib.ensure_string_array( - result, na_value=StringDtype.na_value, copy=copy + scalars, na_value=StringDtype.na_value, copy=copy ) # Manually creating new array avoids the validation step in the __init__, so is diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index efd5d29ae0717..56a8e21edd004 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -336,3 +336,12 @@ def test_memory_usage(): series = pd.Series(["a", "b", "c"], dtype="string") assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True) + + +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) +def test_astype_from_float_dtype(dtype): + # https://github.com/pandas-dev/pandas/issues/36451 + s = pd.Series([0.1], dtype=dtype) + result = s.astype("string") + expected = pd.Series(["0.1"], dtype="string") + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index b9d90a9fc63dd..7449d8d65ef96 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -1,3 +1,4 @@ +import numpy as np import pytest from pandas import Interval, Series, Timestamp, date_range @@ -46,3 +47,11 @@ def test_astype_ignores_errors_for_extension_dtypes(self, values, errors): msg = "(Cannot cast)|(could not convert)" with pytest.raises((ValueError, TypeError), match=msg): values.astype(float, errors=errors) + + @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) + def test_astype_from_float_to_str(self, dtype): + # https://github.com/pandas-dev/pandas/issues/36451 + s = Series([0.1], dtype=dtype) + result = s.astype(str) + expected = Series(["0.1"]) + tm.assert_series_equal(result, expected) From 5a75b1d85a86564116c4d3548854e3c9e33884cc Mon Sep 17 00:00:00 2001 From: jbrockmendel Date: Mon, 21 Sep 2020 05:28:23 -0700 Subject: [PATCH 631/632] CI: troubleshoot segfault (#36511) --- pandas/tests/scalar/test_na_scalar.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pandas/tests/scalar/test_na_scalar.py b/pandas/tests/scalar/test_na_scalar.py index 10d366fe485da..5c4d7e191d1bb 100644 --- a/pandas/tests/scalar/test_na_scalar.py +++ b/pandas/tests/scalar/test_na_scalar.py @@ -305,11 +305,3 @@ def test_pickle_roundtrip_containers(as_frame, values, dtype): s = s.to_frame(name="A") result = tm.round_trip_pickle(s) tm.assert_equal(result, s) - - -@pytest.mark.parametrize("array", [np.array(["a"], dtype=object), ["a"]]) -def test_array_contains_na(array): - # GH 31922 - msg = "boolean value of NA is ambiguous" - with pytest.raises(TypeError, match=msg): - NA in array From 01d778a33c433439bf9ee3dedd7de2a779d42560 Mon Sep 17 00:00:00 2001 From: Erfan Nariman Date: Mon, 21 Sep 2020 18:24:15 +0200 Subject: [PATCH 632/632] merge conflicts --- pandas/tests/io/parser/test_read_fwf.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py index b89713daeb3cd..5087d0e50c9ea 100644 --- a/pandas/tests/io/parser/test_read_fwf.py +++ b/pandas/tests/io/parser/test_read_fwf.py @@ -173,13 +173,7 @@ def test_read_csv_compat(): def test_bytes_io_input(): -<<<<<<< HEAD result = read_fwf(BytesIO("שלום\nשלום".encode()), widths=[2, 2], encoding="utf8") -======= - result = read_fwf( - BytesIO("שלום\nשלום".encode("utf8")), col_widths=[2, 2], encoding="utf8" - ) ->>>>>>> af312e5635c1cd311e8ffad81ac00848b5d1f57e expected = DataFrame([["של", "ום"]], columns=["של", "ום"]) tm.assert_frame_equal(result, expected)