diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 757cea2c710b2..53a98fc43becc 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -57,9 +57,11 @@ _interval_shared_docs = {} -_shared_docs_kwargs = dict( - klass="IntervalArray", qualname="arrays.IntervalArray", name="" -) +_shared_docs_kwargs = { + "klass": "IntervalArray", + "qualname": "arrays.IntervalArray", + "name": "", +} _interval_shared_docs[ @@ -127,14 +129,14 @@ @Appender( _interval_shared_docs["class"] - % dict( - klass="IntervalArray", - summary="Pandas array for interval data that are closed on the same side.", - versionadded="0.24.0", - name="", - extra_attributes="", - extra_methods="", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "summary": "Pandas array for interval data that are closed on the same side.", + "versionadded": "0.24.0", + "name": "", + "extra_attributes": "", + "extra_methods": "", + "examples": textwrap.dedent( """\ Examples -------- @@ -151,7 +153,7 @@ :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`. """ ), - ) + } ) class IntervalArray(IntervalMixin, ExtensionArray): ndim = 1 @@ -319,9 +321,9 @@ def _from_factorized(cls, values, original): @classmethod @Appender( _interval_shared_docs["from_breaks"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( """\ Examples -------- @@ -331,7 +333,7 @@ def _from_factorized(cls, values, original): Length: 3, closed: right, dtype: interval[int64] """ ), - ) + } ) def from_breaks(cls, breaks, closed="right", copy=False, dtype=None): breaks = maybe_convert_platform_interval(breaks) @@ -390,9 +392,9 @@ def from_breaks(cls, breaks, closed="right", copy=False, dtype=None): @classmethod @Appender( _interval_shared_docs["from_arrays"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( """\ >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3]) @@ -400,7 +402,7 @@ def from_breaks(cls, breaks, closed="right", copy=False, dtype=None): Length: 3, closed: right, dtype: interval[int64] """ ), - ) + } ) def from_arrays(cls, left, right, closed="right", copy=False, dtype=None): left = maybe_convert_platform_interval(left) @@ -445,9 +447,9 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None): @classmethod @Appender( _interval_shared_docs["from_tuples"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( """\ Examples -------- @@ -457,7 +459,7 @@ def from_arrays(cls, left, right, closed="right", copy=False, dtype=None): Length: 2, closed: right, dtype: interval[int64] """ ), - ) + } ) def from_tuples(cls, data, closed="right", copy=False, dtype=None): if len(data): @@ -904,7 +906,7 @@ def take(self, indices, *, allow_fill=False, fill_value=None, axis=None, **kwarg When `indices` contains negative values other than ``-1`` and `allow_fill` is True. """ - nv.validate_take(tuple(), kwargs) + nv.validate_take((), kwargs) fill_left = fill_right = fill_value if allow_fill: @@ -1144,9 +1146,9 @@ def mid(self): @Appender( _interval_shared_docs["overlaps"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( """\ >>> data = [(0, 1), (1, 3), (2, 4)] >>> intervals = pd.arrays.IntervalArray.from_tuples(data) @@ -1156,7 +1158,7 @@ def mid(self): Length: 3, closed: right, dtype: interval[int64] """ ), - ) + } ) def overlaps(self, other): if isinstance(other, (IntervalArray, ABCIntervalIndex)): @@ -1207,9 +1209,9 @@ def closed(self): @Appender( _interval_shared_docs["set_closed"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( """\ Examples -------- @@ -1224,7 +1226,7 @@ def closed(self): Length: 3, closed: both, dtype: interval[int64] """ ), - ) + } ) def set_closed(self, closed): if closed not in VALID_CLOSED: @@ -1360,7 +1362,7 @@ def __arrow_array__(self, type=None): """ @Appender( - _interval_shared_docs["to_tuples"] % dict(return_type="ndarray", examples="") + _interval_shared_docs["to_tuples"] % {"return_type": "ndarray", "examples": ""} ) def to_tuples(self, na_tuple=True): tuples = com.asarray_tuplesafe(zip(self._left, self._right)) @@ -1373,7 +1375,7 @@ def to_tuples(self, na_tuple=True): @Appender(_extension_array_shared_docs["repeat"] % _shared_docs_kwargs) def repeat(self, repeats, axis=None): - nv.validate_repeat(tuple(), dict(axis=axis)) + nv.validate_repeat((), {"axis": axis}) left_repeat = self.left.repeat(repeats) right_repeat = self.right.repeat(repeats) return self._shallow_copy(left=left_repeat, right=right_repeat) @@ -1412,9 +1414,9 @@ def repeat(self, repeats, axis=None): @Appender( _interval_shared_docs["contains"] - % dict( - klass="IntervalArray", - examples=textwrap.dedent( + % { + "klass": "IntervalArray", + "examples": textwrap.dedent( """\ >>> intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 3), (2, 4)]) >>> intervals @@ -1423,7 +1425,7 @@ def repeat(self, repeats, axis=None): Length: 3, closed: right, dtype: interval[int64] """ ), - ) + } ) def contains(self, other): if isinstance(other, Interval): diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index c20955227d05e..f8d283f622d4d 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -112,7 +112,7 @@ def roundtrip(key, obj, **kwargs): tm.assert_frame_equal(o, roundtrip("frame", o)) # table - df = DataFrame(dict(A=range(5), B=range(5))) + df = DataFrame({"A": range(5), "B": range(5)}) df.to_hdf(path, "table", append=True) result = read_hdf(path, "table", where=["index>2"]) tm.assert_frame_equal(df[df.index > 2], result) @@ -370,7 +370,7 @@ def test_keys_ignore_hdf_softlink(self, setup_path): with ensure_clean_store(setup_path) as store: - df = DataFrame(dict(A=range(5), B=range(5))) + df = DataFrame({"A": range(5), "B": range(5)}) store.put("df", df) assert store.keys() == ["/df"] @@ -1081,7 +1081,7 @@ def check(format, index): def test_encoding(self, setup_path): with ensure_clean_store(setup_path) as store: - df = DataFrame(dict(A="foo", B="bar"), index=range(5)) + df = DataFrame({"A": "foo", "B": "bar"}, index=range(5)) df.loc[2, "A"] = np.nan df.loc[3, "B"] = np.nan _maybe_remove(store, "df") @@ -1458,7 +1458,7 @@ def check_col(key, name, size): store.get_storer(key).table.description, name ).itemsize, size - df = DataFrame(dict(A="foo", B="bar"), index=range(10)) + df = DataFrame({"A": "foo", "B": "bar"}, index=range(10)) # a min_itemsize that creates a data_column _maybe_remove(store, "df") @@ -2188,13 +2188,13 @@ def test_append_with_timedelta(self, setup_path): # append timedelta df = DataFrame( - dict( - A=Timestamp("20130101"), - B=[ + { + "A": Timestamp("20130101"), + "B": [ Timestamp("20130101") + timedelta(days=i, seconds=10) for i in range(10) ], - ) + } ) df["C"] = df["A"] - df["B"] df.loc[3:5, "C"] = np.nan @@ -2732,7 +2732,10 @@ def test_select_dtypes(self, setup_path): with ensure_clean_store(setup_path) as store: # with a Timestamp data column (GH #2637) df = DataFrame( - dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300)) + { + "ts": bdate_range("2012-01-01", periods=300), + "A": np.random.randn(300), + } ) _maybe_remove(store, "df") store.append("df", df, data_columns=["ts", "A"]) @@ -2760,7 +2763,7 @@ def test_select_dtypes(self, setup_path): tm.assert_frame_equal(expected, result) # integer index - df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) + df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)}) _maybe_remove(store, "df_int") store.append("df_int", df) result = store.select("df_int", "index<10 and columns=['A']") @@ -2769,11 +2772,11 @@ def test_select_dtypes(self, setup_path): # float index df = DataFrame( - dict( - A=np.random.rand(20), - B=np.random.rand(20), - index=np.arange(20, dtype="f8"), - ) + { + "A": np.random.rand(20), + "B": np.random.rand(20), + "index": np.arange(20, dtype="f8"), + } ) _maybe_remove(store, "df_float") store.append("df_float", df) @@ -2784,7 +2787,7 @@ def test_select_dtypes(self, setup_path): with ensure_clean_store(setup_path) as store: # floats w/o NaN - df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64") + df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64") df["cols"] = (df["cols"] + 10).apply(str) store.append("df1", df, data_columns=True) @@ -2808,7 +2811,7 @@ def test_select_dtypes(self, setup_path): # tm.assert_frame_equal(expected, result) # not in first position float with NaN ok too - df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64") + df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64") df["cols"] = (df["cols"] + 10).apply(str) df.iloc[1] = np.nan @@ -2835,15 +2838,15 @@ def test_select_with_many_inputs(self, setup_path): with ensure_clean_store(setup_path) as store: df = DataFrame( - dict( - ts=bdate_range("2012-01-01", periods=300), - A=np.random.randn(300), - B=range(300), - users=["a"] * 50 + { + "ts": bdate_range("2012-01-01", periods=300), + "A": np.random.randn(300), + "B": range(300), + "users": ["a"] * 50 + ["b"] * 50 + ["c"] * 100 + [f"a{i:03d}" for i in range(100)], - ) + } ) _maybe_remove(store, "df") store.append("df", df, data_columns=["ts", "A", "B", "users"]) @@ -3139,7 +3142,7 @@ def test_retain_index_attributes(self, setup_path): # GH 3499, losing frequency info on index recreation df = DataFrame( - dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H"))) + {"A": Series(range(3), index=date_range("2000-1-1", periods=3, freq="H"))} ) with ensure_clean_store(setup_path) as store: @@ -3158,11 +3161,11 @@ def test_retain_index_attributes(self, setup_path): # try to append a table with a different frequency with catch_warnings(record=True): df2 = DataFrame( - dict( - A=Series( + { + "A": Series( range(3), index=date_range("2002-1-1", periods=3, freq="D") ) - ) + } ) store.append("data", df2) @@ -3171,8 +3174,8 @@ def test_retain_index_attributes(self, setup_path): # this is ok _maybe_remove(store, "df2") df2 = DataFrame( - dict( - A=Series( + { + "A": Series( range(3), index=[ Timestamp("20010101"), @@ -3180,15 +3183,15 @@ def test_retain_index_attributes(self, setup_path): Timestamp("20020101"), ], ) - ) + } ) store.append("df2", df2) df3 = DataFrame( - dict( - A=Series( + { + "A": Series( range(3), index=date_range("2002-1-1", periods=3, freq="D") ) - ) + } ) store.append("df2", df3) @@ -3201,25 +3204,26 @@ def test_retain_index_attributes2(self, setup_path): with catch_warnings(record=True): df = DataFrame( - dict( - A=Series( + { + "A": Series( range(3), index=date_range("2000-1-1", periods=3, freq="H") ) - ) + } ) df.to_hdf(path, "data", mode="w", append=True) df2 = DataFrame( - dict( - A=Series( + { + "A": Series( range(3), index=date_range("2002-1-1", periods=3, freq="D") ) - ) + } ) + df2.to_hdf(path, "data", append=True) idx = date_range("2000-1-1", periods=3, freq="H") idx.name = "foo" - df = DataFrame(dict(A=Series(range(3), index=idx))) + df = DataFrame({"A": Series(range(3), index=idx)}) df.to_hdf(path, "data", mode="w", append=True) assert read_hdf(path, "data").index.name == "foo" @@ -3228,7 +3232,7 @@ def test_retain_index_attributes2(self, setup_path): idx2 = date_range("2001-1-1", periods=3, freq="H") idx2.name = "bar" - df2 = DataFrame(dict(A=Series(range(3), index=idx2))) + df2 = DataFrame({"A": Series(range(3), index=idx2)}) df2.to_hdf(path, "data", append=True) assert read_hdf(path, "data").index.name is None @@ -3529,7 +3533,7 @@ def test_coordinates(self, setup_path): # get coordinates back & test vs frame _maybe_remove(store, "df") - df = DataFrame(dict(A=range(5), B=range(5))) + df = DataFrame({"A": range(5), "B": range(5)}) store.append("df", df) c = store.select_as_coordinates("df", ["index<3"]) assert (c.values == np.arange(3)).all() @@ -3791,12 +3795,12 @@ def test_nan_selection_bug_4858(self, setup_path): with ensure_clean_store(setup_path) as store: - df = DataFrame(dict(cols=range(6), values=range(6)), dtype="float64") + df = DataFrame({"cols": range(6), "values": range(6)}, dtype="float64") df["cols"] = (df["cols"] + 10).apply(str) df.iloc[0] = np.nan expected = DataFrame( - dict(cols=["13.0", "14.0", "15.0"], values=[3.0, 4.0, 5.0]), + {"cols": ["13.0", "14.0", "15.0"], "values": [3.0, 4.0, 5.0]}, index=[3, 4, 5], ) @@ -3810,7 +3814,7 @@ def test_start_stop_table(self, setup_path): with ensure_clean_store(setup_path) as store: # table - df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) + df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)}) store.append("df", df) result = store.select("df", "columns=['A']", start=0, stop=5) @@ -3845,7 +3849,7 @@ def test_start_stop_fixed(self, setup_path): # fixed, GH 8287 df = DataFrame( - dict(A=np.random.rand(20), B=np.random.rand(20)), + {"A": np.random.rand(20), "B": np.random.rand(20)}, index=pd.date_range("20130101", periods=20), ) store.put("df", df) @@ -4478,7 +4482,7 @@ def test_categorical_conversion(self, setup_path): data = [4.3, 9.8] # Test without categories - df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data)) + df = DataFrame({"obsids": obsids, "imgids": imgids, "data": data}) # We are expecting an empty DataFrame matching types of df expected = df.iloc[[], :]