From fb42c5a6723d3bf66b2f9c71b81a1d8299c5c587 Mon Sep 17 00:00:00 2001 From: Maria Ilie Date: Thu, 1 Oct 2020 23:04:58 -0700 Subject: [PATCH 1/4] ran blacken docs tool and checked output to improve formatting --- doc/source/user_guide/10min.rst | 170 +++++---- doc/source/user_guide/advanced.rst | 251 +++++++------ doc/source/user_guide/basics.rst | 564 ++++++++++++++++------------- 3 files changed, 531 insertions(+), 454 deletions(-) diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst index c3746cbe777a3..4da205a2230fd 100644 --- a/doc/source/user_guide/10min.rst +++ b/doc/source/user_guide/10min.rst @@ -34,21 +34,25 @@ and labeled columns: .. ipython:: python - dates = pd.date_range('20130101', periods=6) + dates = pd.date_range("20130101", periods=6) dates - df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD')) + df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list("ABCD")) df Creating a :class:`DataFrame` by passing a dict of objects that can be converted to series-like. .. ipython:: python - df2 = pd.DataFrame({'A': 1., - 'B': pd.Timestamp('20130102'), - 'C': pd.Series(1, index=list(range(4)), dtype='float32'), - 'D': np.array([3] * 4, dtype='int32'), - 'E': pd.Categorical(["test", "train", "test", "train"]), - 'F': 'foo'}) + df2 = pd.DataFrame( + { + "A": 1.0, + "B": pd.Timestamp("20130102"), + "C": pd.Series(1, index=list(range(4)), dtype="float32"), + "D": np.array([3] * 4, dtype="int32"), + "E": pd.Categorical(["test", "train", "test", "train"]), + "F": "foo", + } + ) df2 The columns of the resulting :class:`DataFrame` have different @@ -152,7 +156,7 @@ Sorting by values: .. ipython:: python - df.sort_values(by='B') + df.sort_values(by="B") Selection --------- @@ -174,14 +178,14 @@ equivalent to ``df.A``: .. ipython:: python - df['A'] + df["A"] Selecting via ``[]``, which slices the rows. .. ipython:: python df[0:3] - df['20130102':'20130104'] + df["20130102":"20130104"] Selection by label ~~~~~~~~~~~~~~~~~~ @@ -198,31 +202,31 @@ Selecting on a multi-axis by label: .. ipython:: python - df.loc[:, ['A', 'B']] + df.loc[:, ["A", "B"]] Showing label slicing, both endpoints are *included*: .. ipython:: python - df.loc['20130102':'20130104', ['A', 'B']] + df.loc["20130102":"20130104", ["A", "B"]] Reduction in the dimensions of the returned object: .. ipython:: python - df.loc['20130102', ['A', 'B']] + df.loc["20130102", ["A", "B"]] For getting a scalar value: .. ipython:: python - df.loc[dates[0], 'A'] + df.loc[dates[0], "A"] For getting fast access to a scalar (equivalent to the prior method): .. ipython:: python - df.at[dates[0], 'A'] + df.at[dates[0], "A"] Selection by position ~~~~~~~~~~~~~~~~~~~~~ @@ -278,7 +282,7 @@ Using a single column's values to select data. .. ipython:: python - df[df['A'] > 0] + df[df["A"] > 0] Selecting values from a DataFrame where a boolean condition is met. @@ -291,9 +295,9 @@ Using the :func:`~Series.isin` method for filtering: .. ipython:: python df2 = df.copy() - df2['E'] = ['one', 'one', 'two', 'three', 'four', 'three'] + df2["E"] = ["one", "one", "two", "three", "four", "three"] df2 - df2[df2['E'].isin(['two', 'four'])] + df2[df2["E"].isin(["two", "four"])] Setting ~~~~~~~ @@ -303,15 +307,15 @@ by the indexes. .. ipython:: python - s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range('20130102', periods=6)) + s1 = pd.Series([1, 2, 3, 4, 5, 6], index=pd.date_range("20130102", periods=6)) s1 - df['F'] = s1 + df["F"] = s1 Setting values by label: .. ipython:: python - df.at[dates[0], 'A'] = 0 + df.at[dates[0], "A"] = 0 Setting values by position: @@ -323,7 +327,7 @@ Setting by assigning with a NumPy array: .. ipython:: python - df.loc[:, 'D'] = np.array([5] * len(df)) + df.loc[:, "D"] = np.array([5] * len(df)) The result of the prior setting operations. @@ -352,15 +356,15 @@ returns a copy of the data. .. ipython:: python - df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E']) - df1.loc[dates[0]:dates[1], 'E'] = 1 + df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ["E"]) + df1.loc[dates[0] : dates[1], "E"] = 1 df1 To drop any rows that have missing data. .. ipython:: python - df1.dropna(how='any') + df1.dropna(how="any") Filling missing data. @@ -404,7 +408,7 @@ In addition, pandas automatically broadcasts along the specified dimension. s = pd.Series([1, 3, 5, np.nan, 6, 8], index=dates).shift(2) s - df.sub(s, axis='index') + df.sub(s, axis="index") Apply @@ -440,7 +444,7 @@ some cases always uses them). See more at :ref:`Vectorized String Methods .. ipython:: python - s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat']) + s = pd.Series(["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"]) s.str.lower() Merge @@ -482,21 +486,21 @@ SQL style merges. See the :ref:`Database style joining ` section. .. ipython:: python - left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]}) - right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]}) + left = pd.DataFrame({"key": ["foo", "foo"], "lval": [1, 2]}) + right = pd.DataFrame({"key": ["foo", "foo"], "rval": [4, 5]}) left right - pd.merge(left, right, on='key') + pd.merge(left, right, on="key") Another example that can be given is: .. ipython:: python - left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]}) - right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]}) + left = pd.DataFrame({"key": ["foo", "bar"], "lval": [1, 2]}) + right = pd.DataFrame({"key": ["foo", "bar"], "rval": [4, 5]}) left right - pd.merge(left, right, on='key') + pd.merge(left, right, on="key") Grouping -------- @@ -512,12 +516,14 @@ See the :ref:`Grouping section `. .. ipython:: python - df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) + df = pd.DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.randn(8), + "D": np.random.randn(8), + } + ) df Grouping and then applying the :meth:`~pandas.core.groupby.GroupBy.sum` function to the resulting @@ -525,14 +531,14 @@ groups. .. ipython:: python - df.groupby('A').sum() + df.groupby("A").sum() Grouping by multiple columns forms a hierarchical index, and again we can apply the :meth:`~pandas.core.groupby.GroupBy.sum` function. .. ipython:: python - df.groupby(['A', 'B']).sum() + df.groupby(["A", "B"]).sum() Reshaping --------- @@ -545,12 +551,16 @@ Stack .. ipython:: python - tuples = list(zip(*[['bar', 'bar', 'baz', 'baz', - 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', - 'one', 'two', 'one', 'two']])) - index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) - df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=['A', 'B']) + tuples = list( + zip( + *[ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + ) + ) + index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"]) + df = pd.DataFrame(np.random.randn(8, 2), index=index, columns=["A", "B"]) df2 = df[:4] df2 @@ -578,18 +588,22 @@ See the section on :ref:`Pivot Tables `. .. ipython:: python - df = pd.DataFrame({'A': ['one', 'one', 'two', 'three'] * 3, - 'B': ['A', 'B', 'C'] * 4, - 'C': ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2, - 'D': np.random.randn(12), - 'E': np.random.randn(12)}) + df = pd.DataFrame( + { + "A": ["one", "one", "two", "three"] * 3, + "B": ["A", "B", "C"] * 4, + "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 2, + "D": np.random.randn(12), + "E": np.random.randn(12), + } + ) df We can produce pivot tables from this data very easily: .. ipython:: python - pd.pivot_table(df, values='D', index=['A', 'B'], columns=['C']) + pd.pivot_table(df, values="D", index=["A", "B"], columns=["C"]) Time series @@ -602,31 +616,31 @@ financial applications. See the :ref:`Time Series section `. .. ipython:: python - rng = pd.date_range('1/1/2012', periods=100, freq='S') + rng = pd.date_range("1/1/2012", periods=100, freq="S") ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) - ts.resample('5Min').sum() + ts.resample("5Min").sum() Time zone representation: .. ipython:: python - rng = pd.date_range('3/6/2012 00:00', periods=5, freq='D') + rng = pd.date_range("3/6/2012 00:00", periods=5, freq="D") ts = pd.Series(np.random.randn(len(rng)), rng) ts - ts_utc = ts.tz_localize('UTC') + ts_utc = ts.tz_localize("UTC") ts_utc Converting to another time zone: .. ipython:: python - ts_utc.tz_convert('US/Eastern') + ts_utc.tz_convert("US/Eastern") Converting between time span representations: .. ipython:: python - rng = pd.date_range('1/1/2012', periods=5, freq='M') + rng = pd.date_range("1/1/2012", periods=5, freq="M") ts = pd.Series(np.random.randn(len(rng)), index=rng) ts ps = ts.to_period() @@ -640,9 +654,9 @@ the quarter end: .. ipython:: python - prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV') + prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV") ts = pd.Series(np.random.randn(len(prng)), prng) - ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9 + ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9 ts.head() Categoricals @@ -653,8 +667,9 @@ pandas can include categorical data in a :class:`DataFrame`. For full docs, see .. ipython:: python - df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6], - "raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']}) + df = pd.DataFrame( + {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]} + ) Convert the raw grades to a categorical data type. @@ -674,8 +689,9 @@ Reorder the categories and simultaneously add the missing categories (methods un .. ipython:: python - df["grade"] = df["grade"].cat.set_categories(["very bad", "bad", "medium", - "good", "very good"]) + df["grade"] = df["grade"].cat.set_categories( + ["very bad", "bad", "medium", "good", "very good"] + ) df["grade"] Sorting is per order in the categories, not lexical order. @@ -701,7 +717,8 @@ We use the standard convention for referencing the matplotlib API: .. ipython:: python import matplotlib.pyplot as plt - plt.close('all') + + plt.close("all") .. ipython:: python @@ -736,19 +753,20 @@ CSV .. ipython:: python - df.to_csv('foo.csv') + df.to_csv("foo.csv") :ref:`Reading from a csv file. ` .. ipython:: python - pd.read_csv('foo.csv') + pd.read_csv("foo.csv") .. ipython:: python :suppress: import os - os.remove('foo.csv') + + os.remove("foo.csv") HDF5 ~~~~ @@ -759,18 +777,18 @@ Writing to a HDF5 Store. .. ipython:: python - df.to_hdf('foo.h5', 'df') + df.to_hdf("foo.h5", "df") Reading from a HDF5 Store. .. ipython:: python - pd.read_hdf('foo.h5', 'df') + pd.read_hdf("foo.h5", "df") .. ipython:: python :suppress: - os.remove('foo.h5') + os.remove("foo.h5") Excel ~~~~~ @@ -781,18 +799,18 @@ Writing to an excel file. .. ipython:: python - df.to_excel('foo.xlsx', sheet_name='Sheet1') + df.to_excel("foo.xlsx", sheet_name="Sheet1") Reading from an excel file. .. ipython:: python - pd.read_excel('foo.xlsx', 'Sheet1', index_col=None, na_values=['NA']) + pd.read_excel("foo.xlsx", "Sheet1", index_col=None, na_values=["NA"]) .. ipython:: python :suppress: - os.remove('foo.xlsx') + os.remove("foo.xlsx") Gotchas ------- diff --git a/doc/source/user_guide/advanced.rst b/doc/source/user_guide/advanced.rst index 8cd35e94ae743..cec777e0f021e 100644 --- a/doc/source/user_guide/advanced.rst +++ b/doc/source/user_guide/advanced.rst @@ -62,12 +62,14 @@ demonstrate different ways to initialize MultiIndexes. .. ipython:: python - arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] tuples = list(zip(*arrays)) tuples - index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second']) + index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"]) index s = pd.Series(np.random.randn(8), index=index) @@ -78,8 +80,8 @@ to use the :meth:`MultiIndex.from_product` method: .. ipython:: python - iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']] - pd.MultiIndex.from_product(iterables, names=['first', 'second']) + iterables = [["bar", "baz", "foo", "qux"], ["one", "two"]] + pd.MultiIndex.from_product(iterables, names=["first", "second"]) You can also construct a ``MultiIndex`` from a ``DataFrame`` directly, using the method :meth:`MultiIndex.from_frame`. This is a complementary method to @@ -89,9 +91,10 @@ the method :meth:`MultiIndex.from_frame`. This is a complementary method to .. ipython:: python - df = pd.DataFrame([['bar', 'one'], ['bar', 'two'], - ['foo', 'one'], ['foo', 'two']], - columns=['first', 'second']) + df = pd.DataFrame( + [["bar", "one"], ["bar", "two"], ["foo", "one"], ["foo", "two"]], + columns=["first", "second"], + ) pd.MultiIndex.from_frame(df) As a convenience, you can pass a list of arrays directly into ``Series`` or @@ -99,8 +102,10 @@ As a convenience, you can pass a list of arrays directly into ``Series`` or .. ipython:: python - arrays = [np.array(['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux']), - np.array(['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'])] + arrays = [ + np.array(["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"]), + np.array(["one", "two", "one", "two", "one", "two", "one", "two"]), + ] s = pd.Series(np.random.randn(8), index=arrays) s df = pd.DataFrame(np.random.randn(8, 4), index=arrays) @@ -119,7 +124,7 @@ of the index is up to you: .. ipython:: python - df = pd.DataFrame(np.random.randn(3, 8), index=['A', 'B', 'C'], columns=index) + df = pd.DataFrame(np.random.randn(3, 8), index=["A", "B", "C"], columns=index) df pd.DataFrame(np.random.randn(6, 6), index=index[:6], columns=index[:6]) @@ -129,7 +134,7 @@ bit easier on the eyes. Note that how the index is displayed can be controlled u .. ipython:: python - with pd.option_context('display.multi_sparse', False): + with pd.option_context("display.multi_sparse", False): df It's worth keeping in mind that there's nothing preventing you from using @@ -157,7 +162,7 @@ location at a particular level: .. ipython:: python index.get_level_values(0) - index.get_level_values('second') + index.get_level_values("second") Basic indexing on axis with MultiIndex ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -169,10 +174,10 @@ completely analogous way to selecting a column in a regular DataFrame: .. ipython:: python - df['bar'] - df['bar', 'one'] - df['bar']['one'] - s['qux'] + df["bar"] + df["bar", "one"] + df["bar"]["one"] + s["qux"] See :ref:`Cross-section with hierarchical index ` for how to select on a deeper level. @@ -190,7 +195,7 @@ For example:   df.columns.levels # original MultiIndex - df[['foo','qux']].columns.levels # sliced + df[["foo","qux"]].columns.levels # sliced This is done to avoid a recomputation of the levels in order to make slicing highly performant. If you want to see only the used levels, you can use the @@ -198,17 +203,17 @@ highly performant. If you want to see only the used levels, you can use the .. ipython:: python - df[['foo', 'qux']].columns.to_numpy() + df[["foo", "qux"]].columns.to_numpy() # for a specific level - df[['foo', 'qux']].columns.get_level_values(0) + df[["foo", "qux"]].columns.get_level_values(0) To reconstruct the ``MultiIndex`` with only the used levels, the :meth:`~MultiIndex.remove_unused_levels` method may be used. .. ipython:: python - new_mi = df[['foo', 'qux']].columns.remove_unused_levels() + new_mi = df[["foo", "qux"]].columns.remove_unused_levels() new_mi.levels Data alignment and using ``reindex`` @@ -229,7 +234,7 @@ called with another ``MultiIndex``, or even a list or array of tuples: .. ipython:: python s.reindex(index[:3]) - s.reindex([('foo', 'two'), ('bar', 'one'), ('qux', 'one'), ('baz', 'one')]) + s.reindex([("foo", "two"), ("bar", "one"), ("qux", "one"), ("baz", "one")]) .. _advanced.advanced_hierarchical: @@ -244,7 +249,7 @@ keys take the form of tuples. For example, the following works as you would expe df = df.T df - df.loc[('bar', 'two')] + df.loc[("bar", "two")] Note that ``df.loc['bar', 'two']`` would also work in this example, but this shorthand notation can lead to ambiguity in general. @@ -254,7 +259,7 @@ like this: .. ipython:: python - df.loc[('bar', 'two'), 'A'] + df.loc[("bar", "two"), "A"] You don't have to specify all levels of the ``MultiIndex`` by passing only the first elements of the tuple. For example, you can use "partial" indexing to @@ -262,7 +267,7 @@ get all elements with ``bar`` in the first level as follows: .. ipython:: python - df.loc['bar'] + df.loc["bar"] This is a shortcut for the slightly more verbose notation ``df.loc[('bar',),]`` (equivalent to ``df.loc['bar',]`` in this example). @@ -271,20 +276,20 @@ to ``df.loc['bar',]`` in this example). .. ipython:: python - df.loc['baz':'foo'] + df.loc["baz":"foo"] You can slice with a 'range' of values, by providing a slice of tuples. .. ipython:: python - df.loc[('baz', 'two'):('qux', 'one')] - df.loc[('baz', 'two'):'foo'] + df.loc[("baz", "two"):("qux", "one")] + df.loc[("baz", "two"):"foo"] Passing a list of labels or tuples works similar to reindexing: .. ipython:: python - df.loc[[('bar', 'two'), ('qux', 'one')]] + df.loc[[("bar", "two"), ("qux", "one")]] .. note:: @@ -298,8 +303,9 @@ whereas a tuple of lists refer to several values within a level: .. ipython:: python - s = pd.Series([1, 2, 3, 4, 5, 6], - index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]])) + s = pd.Series( + [1, 2, 3, 4, 5, 6], index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]]) + ) s.loc[[("A", "c"), ("B", "d")]] # list of tuples s.loc[(["A", "B"], ["c", "d"])] # tuple of lists @@ -329,37 +335,44 @@ As usual, **both sides** of the slicers are included as this is label indexing. .. code-block:: python - df.loc[(slice('A1', 'A3'), ...), :] # noqa: E999 + df.loc[(slice("A1", "A3"), ...), :] # noqa: E999   You should **not** do this:   .. code-block:: python - df.loc[(slice('A1', 'A3'), ...)] # noqa: E999 + df.loc[(slice("A1", "A3"), ...)] # noqa: E999 .. ipython:: python def mklbl(prefix, n): return ["%s%s" % (prefix, i) for i in range(n)] - miindex = pd.MultiIndex.from_product([mklbl('A', 4), - mklbl('B', 2), - mklbl('C', 4), - mklbl('D', 2)]) - micolumns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), - ('b', 'foo'), ('b', 'bah')], - names=['lvl0', 'lvl1']) - dfmi = pd.DataFrame(np.arange(len(miindex) * len(micolumns)) - .reshape((len(miindex), len(micolumns))), - index=miindex, - columns=micolumns).sort_index().sort_index(axis=1) + + miindex = pd.MultiIndex.from_product( + [mklbl("A", 4), mklbl("B", 2), mklbl("C", 4), mklbl("D", 2)] + ) + micolumns = pd.MultiIndex.from_tuples( + [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], names=["lvl0", "lvl1"] + ) + dfmi = ( + pd.DataFrame( + np.arange(len(miindex) * len(micolumns)).reshape( + (len(miindex), len(micolumns)) + ), + index=miindex, + columns=micolumns, + ) + .sort_index() + .sort_index(axis=1) + ) dfmi Basic MultiIndex slicing using slices, lists, and labels. .. ipython:: python - dfmi.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :] + dfmi.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :] You can use :class:`pandas.IndexSlice` to facilitate a more natural syntax @@ -368,36 +381,36 @@ using ``:``, rather than using ``slice(None)``. .. ipython:: python idx = pd.IndexSlice - dfmi.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']] + dfmi.loc[idx[:, :, ["C1", "C3"]], idx[:, "foo"]] It is possible to perform quite complicated selections using this method on multiple axes at the same time. .. ipython:: python - dfmi.loc['A1', (slice(None), 'foo')] - dfmi.loc[idx[:, :, ['C1', 'C3']], idx[:, 'foo']] + dfmi.loc["A1", (slice(None), "foo")] + dfmi.loc[idx[:, :, ["C1", "C3"]], idx[:, "foo"]] Using a boolean indexer you can provide selection related to the *values*. .. ipython:: python - mask = dfmi[('a', 'foo')] > 200 - dfmi.loc[idx[mask, :, ['C1', 'C3']], idx[:, 'foo']] + mask = dfmi[("a", "foo")] > 200 + dfmi.loc[idx[mask, :, ["C1", "C3"]], idx[:, "foo"]] You can also specify the ``axis`` argument to ``.loc`` to interpret the passed slicers on a single axis. .. ipython:: python - dfmi.loc(axis=0)[:, :, ['C1', 'C3']] + dfmi.loc(axis=0)[:, :, ["C1", "C3"]] Furthermore, you can *set* the values using the following methods. .. ipython:: python df2 = dfmi.copy() - df2.loc(axis=0)[:, :, ['C1', 'C3']] = -10 + df2.loc(axis=0)[:, :, ["C1", "C3"]] = -10 df2 You can use a right-hand-side of an alignable object as well. @@ -405,7 +418,7 @@ You can use a right-hand-side of an alignable object as well. .. ipython:: python df2 = dfmi.copy() - df2.loc[idx[:, :, ['C1', 'C3']], :] = df2 * 1000 + df2.loc[idx[:, :, ["C1", "C3"]], :] = df2 * 1000 df2 .. _advanced.xs: @@ -419,12 +432,12 @@ selecting data at a particular level of a ``MultiIndex`` easier. .. ipython:: python df - df.xs('one', level='second') + df.xs("one", level="second") .. ipython:: python # using the slicers - df.loc[(slice(None), 'one'), :] + df.loc[(slice(None), "one"), :] You can also select on the columns with ``xs``, by providing the axis argument. @@ -432,36 +445,36 @@ providing the axis argument. .. ipython:: python df = df.T - df.xs('one', level='second', axis=1) + df.xs("one", level="second", axis=1) .. ipython:: python # using the slicers - df.loc[:, (slice(None), 'one')] + df.loc[:, (slice(None), "one")] ``xs`` also allows selection with multiple keys. .. ipython:: python - df.xs(('one', 'bar'), level=('second', 'first'), axis=1) + df.xs(("one", "bar"), level=("second", "first"), axis=1) .. ipython:: python # using the slicers - df.loc[:, ('bar', 'one')] + df.loc[:, ("bar", "one")] You can pass ``drop_level=False`` to ``xs`` to retain the level that was selected. .. ipython:: python - df.xs('one', level='second', axis=1, drop_level=False) + df.xs("one", level="second", axis=1, drop_level=False) Compare the above with the result using ``drop_level=True`` (the default value). .. ipython:: python - df.xs('one', level='second', axis=1, drop_level=True) + df.xs("one", level="second", axis=1, drop_level=True) .. ipython:: python :suppress: @@ -479,8 +492,9 @@ values across a level. For instance: .. ipython:: python - midx = pd.MultiIndex(levels=[['zero', 'one'], ['x', 'y']], - codes=[[1, 1, 0, 0], [1, 0, 1, 0]]) + midx = pd.MultiIndex( + levels=[["zero", "one"], ["x", "y"]], codes=[[1, 1, 0, 0], [1, 0, 1, 0]] + ) df = pd.DataFrame(np.random.randn(4, 2), index=midx) df df2 = df.mean(level=0) @@ -543,7 +557,7 @@ used to move the values from the ``MultiIndex`` to a column. .. ipython:: python - df.rename_axis(index=['abc', 'def']) + df.rename_axis(index=["abc", "def"]) Note that the columns of a ``DataFrame`` are an index, so that using ``rename_axis`` with the ``columns`` argument will change the name of that @@ -561,7 +575,7 @@ When working with an ``Index`` object directly, rather than via a ``DataFrame``, .. ipython:: python - mi = pd.MultiIndex.from_product([[1, 2], ['a', 'b']], names=['x', 'y']) + mi = pd.MultiIndex.from_product([[1, 2], ["a", "b"]], names=["x", "y"]) mi.names mi2 = mi.rename("new name", level=0) @@ -586,6 +600,7 @@ they need to be sorted. As with any index, you can use :meth:`~DataFrame.sort_in .. ipython:: python import random + random.shuffle(tuples) s = pd.Series(np.random.randn(8), index=pd.MultiIndex.from_tuples(tuples)) s @@ -600,9 +615,9 @@ are named. .. ipython:: python - s.index.set_names(['L1', 'L2'], inplace=True) - s.sort_index(level='L1') - s.sort_index(level='L2') + s.index.set_names(["L1", "L2"], inplace=True) + s.sort_index(level="L1") + s.sort_index(level="L2") On higher dimensional objects, you can sort any of the other axes by level if they have a ``MultiIndex``: @@ -617,10 +632,10 @@ return a copy of the data rather than a view: .. ipython:: python - dfm = pd.DataFrame({'jim': [0, 0, 1, 1], - 'joe': ['x', 'x', 'z', 'y'], - 'jolie': np.random.rand(4)}) - dfm = dfm.set_index(['jim', 'joe']) + dfm = pd.DataFrame( + {"jim": [0, 0, 1, 1], "joe": ["x", "x", "z", "y"], "jolie": np.random.rand(4)} + ) + dfm = dfm.set_index(["jim", "joe"]) dfm .. code-block:: ipython @@ -661,7 +676,7 @@ And now selection works as expected. .. ipython:: python - dfm.loc[(0, 'y'):(1, 'z')] + dfm.loc[(0, "y"):(1, "z")] Take methods ------------ @@ -754,18 +769,18 @@ and allows efficient indexing and storage of an index with a large number of dup .. ipython:: python from pandas.api.types import CategoricalDtype - df = pd.DataFrame({'A': np.arange(6), - 'B': list('aabbca')}) - df['B'] = df['B'].astype(CategoricalDtype(list('cab'))) + + df = pd.DataFrame({"A": np.arange(6), "B": list("aabbca")}) + df["B"] = df["B"].astype(CategoricalDtype(list("cab"))) df df.dtypes - df['B'].cat.categories + df["B"].cat.categories Setting the index will create a ``CategoricalIndex``. .. ipython:: python - df2 = df.set_index('B') + df2 = df.set_index("B") df2.index Indexing with ``__getitem__/.iloc/.loc`` works similarly to an ``Index`` with duplicates. @@ -773,13 +788,13 @@ The indexers **must** be in the category or the operation will raise a ``KeyErro .. ipython:: python - df2.loc['a'] + df2.loc["a"] The ``CategoricalIndex`` is **preserved** after indexing: .. ipython:: python - df2.loc['a'].index + df2.loc["a"].index Sorting the index will sort by the order of the categories (recall that we created the index with ``CategoricalDtype(list('cab'))``, so the sorted @@ -804,17 +819,16 @@ values **not** in the categories, similarly to how you can reindex **any** panda .. ipython:: python - df3 = pd.DataFrame({'A': np.arange(3), - 'B': pd.Series(list('abc')).astype('category')}) - df3 = df3.set_index('B') + df3 = pd.DataFrame({"A": np.arange(3), "B": pd.Series(list("abc")).astype("category")}) + df3 = df3.set_index("B") df3 .. ipython:: python - df3.reindex(['a', 'e']) - df3.reindex(['a', 'e']).index - df3.reindex(pd.Categorical(['a', 'e'], categories=list('abe'))) - df3.reindex(pd.Categorical(['a', 'e'], categories=list('abe'))).index + df3.reindex(["a", "e"]) + df3.reindex(["a", "e"]).index + df3.reindex(pd.Categorical(["a", "e"], categories=list("abe"))) + df3.reindex(pd.Categorical(["a", "e"], categories=list("abe"))).index .. warning:: @@ -823,16 +837,14 @@ values **not** in the categories, similarly to how you can reindex **any** panda .. ipython:: python - df4 = pd.DataFrame({'A': np.arange(2), - 'B': list('ba')}) - df4['B'] = df4['B'].astype(CategoricalDtype(list('ab'))) - df4 = df4.set_index('B') + df4 = pd.DataFrame({"A": np.arange(2), "B": list("ba")}) + df4["B"] = df4["B"].astype(CategoricalDtype(list("ab"))) + df4 = df4.set_index("B") df4.index - df5 = pd.DataFrame({'A': np.arange(2), - 'B': list('bc')}) - df5['B'] = df5['B'].astype(CategoricalDtype(list('bc'))) - df5 = df5.set_index('B') + df5 = pd.DataFrame({"A": np.arange(2), "B": list("bc")}) + df5["B"] = df5["B"].astype(CategoricalDtype(list("bc"))) + df5 = df5.set_index("B") df5.index .. code-block:: ipython @@ -916,12 +928,16 @@ example, be millisecond offsets. .. ipython:: python - dfir = pd.concat([pd.DataFrame(np.random.randn(5, 2), - index=np.arange(5) * 250.0, - columns=list('AB')), - pd.DataFrame(np.random.randn(6, 2), - index=np.arange(4, 10) * 250.1, - columns=list('AB'))]) + dfir = pd.concat( + [ + pd.DataFrame( + np.random.randn(5, 2), index=np.arange(5) * 250.0, columns=list("AB") + ), + pd.DataFrame( + np.random.randn(6, 2), index=np.arange(4, 10) * 250.1, columns=list("AB") + ), + ] + ) dfir Selection operations then will always work on a value basis, for all selection operators. @@ -929,7 +945,7 @@ Selection operations then will always work on a value basis, for all selection o .. ipython:: python dfir[0:1000.4] - dfir.loc[0:1001, 'A'] + dfir.loc[0:1001, "A"] dfir.loc[1000.4] You could retrieve the first 1 second (1000 ms) of data as such: @@ -963,8 +979,9 @@ An ``IntervalIndex`` can be used in ``Series`` and in ``DataFrame`` as the index .. ipython:: python - df = pd.DataFrame({'A': [1, 2, 3, 4]}, - index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4])) + df = pd.DataFrame( + {"A": [1, 2, 3, 4]}, index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4]) + ) df Label based indexing via ``.loc`` along the edges of an interval works as you would expect, @@ -1041,9 +1058,9 @@ datetime-like intervals: pd.interval_range(start=0, end=5) - pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4) + pd.interval_range(start=pd.Timestamp("2017-01-01"), periods=4) - pd.interval_range(end=pd.Timedelta('3 days'), periods=3) + pd.interval_range(end=pd.Timedelta("3 days"), periods=3) The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety of :ref:`frequency aliases ` with datetime-like intervals: @@ -1052,18 +1069,18 @@ of :ref:`frequency aliases ` with datetime-like inter pd.interval_range(start=0, periods=5, freq=1.5) - pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W') + pd.interval_range(start=pd.Timestamp("2017-01-01"), periods=4, freq="W") - pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H') + pd.interval_range(start=pd.Timedelta("0 days"), periods=3, freq="9H") Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals are closed on. Intervals are closed on the right side by default. .. ipython:: python - pd.interval_range(start=0, end=4, closed='both') + pd.interval_range(start=0, end=4, closed="both") - pd.interval_range(start=0, end=4, closed='neither') + pd.interval_range(start=0, end=4, closed="neither") Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements @@ -1073,8 +1090,7 @@ in the resulting ``IntervalIndex``: pd.interval_range(start=0, end=6, periods=4) - pd.interval_range(pd.Timestamp('2018-01-01'), - pd.Timestamp('2018-02-28'), periods=3) + pd.interval_range(pd.Timestamp("2018-01-01"), pd.Timestamp("2018-02-28"), periods=3) Miscellaneous indexing FAQ -------------------------- @@ -1112,7 +1128,7 @@ normal Python ``list``. Monotonicity of an index can be tested with the :meth:`~ .. ipython:: python - df = pd.DataFrame(index=[2, 3, 3, 4, 5], columns=['data'], data=list(range(5))) + df = pd.DataFrame(index=[2, 3, 3, 4, 5], columns=["data"], data=list(range(5))) df.index.is_monotonic_increasing # no rows 0 or 1, but still returns rows 2, 3 (both of them), and 4: @@ -1126,8 +1142,7 @@ On the other hand, if the index is not monotonic, then both slice bounds must be .. ipython:: python - df = pd.DataFrame(index=[2, 3, 1, 4, 3, 5], - columns=['data'], data=list(range(6))) + df = pd.DataFrame(index=[2, 3, 1, 4, 3, 5], columns=["data"], data=list(range(6))) df.index.is_monotonic_increasing # OK because 2 and 4 are in the index @@ -1149,7 +1164,7 @@ the :meth:`~Index.is_unique` attribute. .. ipython:: python - weakly_monotonic = pd.Index(['a', 'b', 'c', 'c']) + weakly_monotonic = pd.Index(["a", "b", "c", "c"]) weakly_monotonic weakly_monotonic.is_monotonic_increasing weakly_monotonic.is_monotonic_increasing & weakly_monotonic.is_unique @@ -1167,7 +1182,7 @@ consider the following ``Series``: .. ipython:: python - s = pd.Series(np.random.randn(6), index=list('abcdef')) + s = pd.Series(np.random.randn(6), index=list("abcdef")) s Suppose we wished to slice from ``c`` to ``e``, using integers this would be @@ -1190,7 +1205,7 @@ slicing include both endpoints: .. ipython:: python - s.loc['c':'e'] + s.loc["c":"e"] This is most definitely a "practicality beats purity" sort of thing, but it is something to watch out for if you expect label-based slicing to behave exactly diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst index e348111fe7881..2a403035d1706 100644 --- a/doc/source/user_guide/basics.rst +++ b/doc/source/user_guide/basics.rst @@ -12,10 +12,9 @@ the :ref:`10 minutes to pandas <10min>` section: .. ipython:: python - index = pd.date_range('1/1/2000', periods=8) - s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) - df = pd.DataFrame(np.random.randn(8, 3), index=index, - columns=['A', 'B', 'C']) + index = pd.date_range("1/1/2000", periods=8) + s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"]) + df = pd.DataFrame(np.random.randn(8, 3), index=index, columns=["A", "B", "C"]) .. _basics.head_tail: @@ -97,7 +96,7 @@ Timezones may be preserved with ``dtype=object`` .. ipython:: python - ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + ser = pd.Series(pd.date_range("2000", periods=2, tz="CET")) ser.to_numpy(dtype=object) Or thrown away with ``dtype='datetime64[ns]'`` @@ -174,8 +173,8 @@ These are both enabled to be used by default, you can control this by setting th .. code-block:: python - pd.set_option('compute.use_bottleneck', False) - pd.set_option('compute.use_numexpr', False) + pd.set_option("compute.use_bottleneck", False) + pd.set_option("compute.use_numexpr", False) .. _basics.binop: @@ -204,18 +203,21 @@ either match on the *index* or *columns* via the **axis** keyword: .. ipython:: python - df = pd.DataFrame({ - 'one': pd.Series(np.random.randn(3), index=['a', 'b', 'c']), - 'two': pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']), - 'three': pd.Series(np.random.randn(3), index=['b', 'c', 'd'])}) + df = pd.DataFrame( + { + "one": pd.Series(np.random.randn(3), index=["a", "b", "c"]), + "two": pd.Series(np.random.randn(4), index=["a", "b", "c", "d"]), + "three": pd.Series(np.random.randn(3), index=["b", "c", "d"]), + } + ) df row = df.iloc[1] - column = df['two'] + column = df["two"] - df.sub(row, axis='columns') + df.sub(row, axis="columns") df.sub(row, axis=1) - df.sub(column, axis='index') + df.sub(column, axis="index") df.sub(column, axis=0) .. ipython:: python @@ -228,10 +230,10 @@ Furthermore you can align a level of a MultiIndexed DataFrame with a Series. .. ipython:: python dfmi = df.copy() - dfmi.index = pd.MultiIndex.from_tuples([(1, 'a'), (1, 'b'), - (1, 'c'), (2, 'a')], - names=['first', 'second']) - dfmi.sub(column, axis=0, level='second') + dfmi.index = pd.MultiIndex.from_tuples( + [(1, "a"), (1, "b"), (1, "c"), (2, "a")], names=["first", "second"] + ) + dfmi.sub(column, axis=0, level="second") Series and Index also support the :func:`divmod` builtin. This function takes the floor division and modulo operation at the same time returning a two-tuple @@ -273,7 +275,7 @@ using ``fillna`` if you wish). :suppress: df2 = df.copy() - df2['three']['a'] = 1. + df2["three"]["a"] = 1.0 .. ipython:: python @@ -325,7 +327,7 @@ You can test if a pandas object is empty, via the :attr:`~DataFrame.empty` prope .. ipython:: python df.empty - pd.DataFrame(columns=list('ABC')).empty + pd.DataFrame(columns=list("ABC")).empty To evaluate single-element pandas objects in a boolean context, use the method :meth:`~DataFrame.bool`: @@ -394,8 +396,8 @@ equality to be True: .. ipython:: python - df1 = pd.DataFrame({'col': ['foo', 0, np.nan]}) - df2 = pd.DataFrame({'col': [np.nan, 0, 'foo']}, index=[2, 1, 0]) + df1 = pd.DataFrame({"col": ["foo", 0, np.nan]}) + df2 = pd.DataFrame({"col": [np.nan, 0, "foo"]}, index=[2, 1, 0]) df1.equals(df2) df1.equals(df2.sort_index()) @@ -407,16 +409,16 @@ data structure with a scalar value: .. ipython:: python - pd.Series(['foo', 'bar', 'baz']) == 'foo' - pd.Index(['foo', 'bar', 'baz']) == 'foo' + pd.Series(["foo", "bar", "baz"]) == "foo" + pd.Index(["foo", "bar", "baz"]) == "foo" Pandas also handles element-wise comparisons between different array-like objects of the same length: .. ipython:: python - pd.Series(['foo', 'bar', 'baz']) == pd.Index(['foo', 'bar', 'qux']) - pd.Series(['foo', 'bar', 'baz']) == np.array(['foo', 'bar', 'qux']) + pd.Series(["foo", "bar", "baz"]) == pd.Index(["foo", "bar", "qux"]) + pd.Series(["foo", "bar", "baz"]) == np.array(["foo", "bar", "qux"]) Trying to compare ``Index`` or ``Series`` objects of different lengths will raise a ValueError: @@ -458,10 +460,12 @@ which we illustrate: .. ipython:: python - df1 = pd.DataFrame({'A': [1., np.nan, 3., 5., np.nan], - 'B': [np.nan, 2., 3., np.nan, 6.]}) - df2 = pd.DataFrame({'A': [5., 2., 4., np.nan, 3., 7.], - 'B': [np.nan, np.nan, 3., 4., 6., 8.]}) + df1 = pd.DataFrame( + {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]} + ) + df2 = pd.DataFrame( + {"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0], "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0]} + ) df1 df2 df1.combine_first(df2) @@ -480,6 +484,8 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above: def combiner(x, y): return np.where(pd.isna(x), y, x) + + df1.combine(df2, combiner) .. _basics.stats: @@ -570,8 +576,8 @@ will exclude NAs on Series input by default: .. ipython:: python - np.mean(df['one']) - np.mean(df['one'].to_numpy()) + np.mean(df["one"]) + np.mean(df["one"].to_numpy()) :meth:`Series.nunique` will return the number of unique non-NA values in a Series: @@ -597,8 +603,7 @@ course): series = pd.Series(np.random.randn(1000)) series[::2] = np.nan series.describe() - frame = pd.DataFrame(np.random.randn(1000, 5), - columns=['a', 'b', 'c', 'd', 'e']) + frame = pd.DataFrame(np.random.randn(1000, 5), columns=["a", "b", "c", "d", "e"]) frame.iloc[::2] = np.nan frame.describe() @@ -606,7 +611,7 @@ You can select specific percentiles to include in the output: .. ipython:: python - series.describe(percentiles=[.05, .25, .75, .95]) + series.describe(percentiles=[0.05, 0.25, 0.75, 0.95]) By default, the median is always included. @@ -615,7 +620,7 @@ summary of the number of unique values and most frequently occurring values: .. ipython:: python - s = pd.Series(['a', 'a', 'b', 'b', 'a', 'a', np.nan, 'c', 'd', 'a']) + s = pd.Series(["a", "a", "b", "b", "a", "a", np.nan, "c", "d", "a"]) s.describe() Note that on a mixed-type DataFrame object, :meth:`~DataFrame.describe` will @@ -624,7 +629,7 @@ categorical columns: .. ipython:: python - frame = pd.DataFrame({'a': ['Yes', 'Yes', 'No', 'No'], 'b': range(4)}) + frame = pd.DataFrame({"a": ["Yes", "Yes", "No", "No"], "b": range(4)}) frame.describe() This behavior can be controlled by providing a list of types as ``include``/``exclude`` @@ -632,9 +637,9 @@ arguments. The special value ``all`` can also be used: .. ipython:: python - frame.describe(include=['object']) - frame.describe(include=['number']) - frame.describe(include='all') + frame.describe(include=["object"]) + frame.describe(include=["number"]) + frame.describe(include="all") That feature relies on :ref:`select_dtypes `. Refer to there for details about accepted inputs. @@ -654,7 +659,7 @@ corresponding values: s1 s1.idxmin(), s1.idxmax() - df1 = pd.DataFrame(np.random.randn(5, 3), columns=['A', 'B', 'C']) + df1 = pd.DataFrame(np.random.randn(5, 3), columns=["A", "B", "C"]) df1 df1.idxmin(axis=0) df1.idxmax(axis=1) @@ -665,9 +670,9 @@ matching index: .. ipython:: python - df3 = pd.DataFrame([2, 1, 1, 3, np.nan], columns=['A'], index=list('edcba')) + df3 = pd.DataFrame([2, 1, 1, 3, np.nan], columns=["A"], index=list("edcba")) df3 - df3['A'].idxmin() + df3["A"].idxmin() .. note:: @@ -706,8 +711,9 @@ Similarly, you can get the most frequently occurring value(s), i.e. the mode, of s5 = pd.Series([1, 1, 3, 3, 3, 5, 5, 7, 7, 7]) s5.mode() - df5 = pd.DataFrame({"A": np.random.randint(0, 7, size=50), - "B": np.random.randint(-10, 15, size=50)}) + df5 = pd.DataFrame( + {"A": np.random.randint(0, 7, size=50), "B": np.random.randint(-10, 15, size=50)} + ) df5.mode() @@ -732,7 +738,7 @@ normally distributed data into equal-size quartiles like so: .. ipython:: python arr = np.random.randn(30) - factor = pd.qcut(arr, [0, .25, .5, .75, 1]) + factor = pd.qcut(arr, [0, 0.25, 0.5, 0.75, 1]) factor pd.value_counts(factor) @@ -775,18 +781,20 @@ First some setup: """ Chicago, IL -> Chicago for city_name column """ - df['city_name'] = df['city_and_code'].str.split(",").str.get(0) + df["city_name"] = df["city_and_code"].str.split(",").str.get(0) return df + def add_country_name(df, country_name=None): """ Chicago -> Chicago-US for city_name column """ - col = 'city_name' - df['city_and_country'] = df[col] + country_name + col = "city_name" + df["city_and_country"] = df[col] + country_name return df - df_p = pd.DataFrame({'city_and_code': ['Chicago, IL']}) + + df_p = pd.DataFrame({"city_and_code": ["Chicago, IL"]}) ``extract_city_name`` and ``add_country_name`` are functions taking and returning ``DataFrames``. @@ -795,14 +803,13 @@ Now compare the following: .. ipython:: python - add_country_name(extract_city_name(df_p), country_name='US') + add_country_name(extract_city_name(df_p), country_name="US") Is equivalent to: .. ipython:: python - (df_p.pipe(extract_city_name) - .pipe(add_country_name, country_name="US")) + (df_p.pipe(extract_city_name).pipe(add_country_name, country_name="US")) Pandas encourages the second style, which is known as method chaining. ``pipe`` makes it easy to use your own or another library's functions @@ -820,14 +827,15 @@ For example, we can fit a regression using statsmodels. Their API expects a form import statsmodels.formula.api as sm - bb = pd.read_csv('data/baseball.csv', index_col='id') + bb = pd.read_csv("data/baseball.csv", index_col="id") - (bb.query('h > 0') - .assign(ln_h=lambda df: np.log(df.h)) - .pipe((sm.ols, 'data'), 'hr ~ ln_h + year + g + C(lg)') - .fit() - .summary() - ) + ( + bb.query("h > 0") + .assign(ln_h=lambda df: np.log(df.h)) + .pipe((sm.ols, "data"), "hr ~ ln_h + year + g + C(lg)") + .fit() + .summary() + ) The pipe method is inspired by unix pipes and more recently dplyr_ and magrittr_, which have introduced the popular ``(%>%)`` (read pipe) operator for R_. @@ -858,8 +866,8 @@ The :meth:`~DataFrame.apply` method will also dispatch on a string method name. .. ipython:: python - df.apply('mean') - df.apply('mean', axis=1) + df.apply("mean") + df.apply("mean", axis=1) The return type of the function passed to :meth:`~DataFrame.apply` affects the type of the final output from ``DataFrame.apply`` for the default behaviour: @@ -878,8 +886,11 @@ maximum value for each column occurred: .. ipython:: python - tsdf = pd.DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'], - index=pd.date_range('1/1/2000', periods=1000)) + tsdf = pd.DataFrame( + np.random.randn(1000, 3), + columns=["A", "B", "C"], + index=pd.date_range("1/1/2000", periods=1000), + ) tsdf.apply(lambda x: x.idxmax()) You may also pass additional arguments and keyword arguments to the :meth:`~DataFrame.apply` @@ -902,8 +913,11 @@ Series operation on each column or row: .. ipython:: python :suppress: - tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], - index=pd.date_range('1/1/2000', periods=10)) + tsdf = pd.DataFrame( + np.random.randn(10, 3), + columns=["A", "B", "C"], + index=pd.date_range("1/1/2000", periods=10), + ) tsdf.iloc[3:7] = np.nan .. ipython:: python @@ -933,8 +947,11 @@ We will use a similar starting frame from above: .. ipython:: python - tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], - index=pd.date_range('1/1/2000', periods=10)) + tsdf = pd.DataFrame( + np.random.randn(10, 3), + columns=["A", "B", "C"], + index=pd.date_range("1/1/2000", periods=10), + ) tsdf.iloc[3:7] = np.nan tsdf @@ -946,7 +963,7 @@ output: tsdf.agg(np.sum) - tsdf.agg('sum') + tsdf.agg("sum") # these are equivalent to a ``.sum()`` because we are aggregating # on a single function @@ -956,7 +973,7 @@ Single aggregations on a ``Series`` this will return a scalar value: .. ipython:: python - tsdf['A'].agg('sum') + tsdf["A"].agg("sum") Aggregating with multiple functions @@ -968,25 +985,25 @@ These are naturally named from the aggregation function. .. ipython:: python - tsdf.agg(['sum']) + tsdf.agg(["sum"]) Multiple functions yield multiple rows: .. ipython:: python - tsdf.agg(['sum', 'mean']) + tsdf.agg(["sum", "mean"]) On a ``Series``, multiple functions return a ``Series``, indexed by the function names: .. ipython:: python - tsdf['A'].agg(['sum', 'mean']) + tsdf["A"].agg(["sum", "mean"]) Passing a ``lambda`` function will yield a ```` named row: .. ipython:: python - tsdf['A'].agg(['sum', lambda x: x.mean()]) + tsdf["A"].agg(["sum", lambda x: x.mean()]) Passing a named function will yield that name for the row: @@ -995,7 +1012,8 @@ Passing a named function will yield that name for the row: def mymean(x): return x.mean() - tsdf['A'].agg(['sum', mymean]) + + tsdf["A"].agg(["sum", mymean]) Aggregating with a dict +++++++++++++++++++++++ @@ -1006,7 +1024,7 @@ are not in any particular order, you can use an ``OrderedDict`` instead to guara .. ipython:: python - tsdf.agg({'A': 'mean', 'B': 'sum'}) + tsdf.agg({"A": "mean", "B": "sum"}) Passing a list-like will generate a ``DataFrame`` output. You will get a matrix-like output of all of the aggregators. The output will consist of all unique functions. Those that are @@ -1014,7 +1032,7 @@ not noted for a particular column will be ``NaN``: .. ipython:: python - tsdf.agg({'A': ['mean', 'min'], 'B': 'sum'}) + tsdf.agg({"A": ["mean", "min"], "B": "sum"}) .. _basics.aggregation.mixed_string: @@ -1026,15 +1044,19 @@ aggregations. This is similar to how ``.groupby.agg`` works. .. ipython:: python - mdf = pd.DataFrame({'A': [1, 2, 3], - 'B': [1., 2., 3.], - 'C': ['foo', 'bar', 'baz'], - 'D': pd.date_range('20130101', periods=3)}) + mdf = pd.DataFrame( + { + "A": [1, 2, 3], + "B": [1.0, 2.0, 3.0], + "C": ["foo", "bar", "baz"], + "D": pd.date_range("20130101", periods=3), + } + ) mdf.dtypes .. ipython:: python - mdf.agg(['min', 'sum']) + mdf.agg(["min", "sum"]) .. _basics.aggregation.custom_describe: @@ -1049,11 +1071,11 @@ to the built in :ref:`describe function `. from functools import partial q_25 = partial(pd.Series.quantile, q=0.25) - q_25.__name__ = '25%' + q_25.__name__ = "25%" q_75 = partial(pd.Series.quantile, q=0.75) - q_75.__name__ = '75%' + q_75.__name__ = "75%" - tsdf.agg(['count', 'mean', 'std', 'min', q_25, 'median', q_75, 'max']) + tsdf.agg(["count", "mean", "std", "min", q_25, "median", q_75, "max"]) .. _basics.transform: @@ -1068,8 +1090,11 @@ We create a frame similar to the one used in the above sections. .. ipython:: python - tsdf = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'], - index=pd.date_range('1/1/2000', periods=10)) + tsdf = pd.DataFrame( + np.random.randn(10, 3), + columns=["A", "B", "C"], + index=pd.date_range("1/1/2000", periods=10), + ) tsdf.iloc[3:7] = np.nan tsdf @@ -1080,7 +1105,7 @@ function name or a user defined function. :okwarning: tsdf.transform(np.abs) - tsdf.transform('abs') + tsdf.transform("abs") tsdf.transform(lambda x: x.abs()) Here :meth:`~DataFrame.transform` received a single function; this is equivalent to a `ufunc @@ -1094,7 +1119,7 @@ Passing a single function to ``.transform()`` with a ``Series`` will yield a sin .. ipython:: python - tsdf['A'].transform(np.abs) + tsdf["A"].transform(np.abs) Transform with multiple functions @@ -1113,7 +1138,7 @@ resulting column names will be the transforming functions. .. ipython:: python - tsdf['A'].transform([np.abs, lambda x: x + 1]) + tsdf["A"].transform([np.abs, lambda x: x + 1]) Transforming with a dict @@ -1124,7 +1149,7 @@ Passing a dict of functions will allow selective transforming per column. .. ipython:: python - tsdf.transform({'A': np.abs, 'B': lambda x: x + 1}) + tsdf.transform({"A": np.abs, "B": lambda x: x + 1}) Passing a dict of lists will generate a MultiIndexed DataFrame with these selective transforms. @@ -1132,7 +1157,7 @@ selective transforms. .. ipython:: python :okwarning: - tsdf.transform({'A': np.abs, 'B': [lambda x: x + 1, 'sqrt']}) + tsdf.transform({"A": np.abs, "B": [lambda x: x + 1, "sqrt"]}) .. _basics.elementwise: @@ -1153,10 +1178,12 @@ a single value and returning a single value. For example: df4 + def f(x): return len(str(x)) - df4['one'].map(f) + + df4["one"].map(f) df4.applymap(f) :meth:`Series.map` has an additional feature; it can be used to easily @@ -1165,9 +1192,8 @@ to :ref:`merging/joining functionality `: .. ipython:: python - s = pd.Series(['six', 'seven', 'six', 'seven', 'six'], - index=['a', 'b', 'c', 'd', 'e']) - t = pd.Series({'six': 6., 'seven': 7.}) + s = pd.Series(["six", "seven", "six", "seven", "six"], index=["a", "b", "c", "d", "e"]) + t = pd.Series({"six": 6.0, "seven": 7.0}) s s.map(t) @@ -1192,9 +1218,9 @@ Here is a simple example: .. ipython:: python - s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) + s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"]) s - s.reindex(['e', 'b', 'f', 'd']) + s.reindex(["e", "b", "f", "d"]) Here, the ``f`` label was not contained in the Series and hence appears as ``NaN`` in the result. @@ -1204,13 +1230,13 @@ With a DataFrame, you can simultaneously reindex the index and columns: .. ipython:: python df - df.reindex(index=['c', 'f', 'b'], columns=['three', 'two', 'one']) + df.reindex(index=["c", "f", "b"], columns=["three", "two", "one"]) You may also use ``reindex`` with an ``axis`` keyword: .. ipython:: python - df.reindex(['c', 'f', 'b'], axis='index') + df.reindex(["c", "f", "b"], axis="index") Note that the ``Index`` objects containing the actual axis labels can be **shared** between objects. So if we have a Series and a DataFrame, the @@ -1230,8 +1256,8 @@ where you specify a single ``labels`` argument and the ``axis`` it applies to. .. ipython:: python - df.reindex(['c', 'f', 'b'], axis='index') - df.reindex(['three', 'two', 'one'], axis='columns') + df.reindex(["c", "f", "b"], axis="index") + df.reindex(["three", "two", "one"], axis="columns") .. seealso:: @@ -1261,7 +1287,7 @@ available to make this simpler: .. ipython:: python :suppress: - df2 = df.reindex(['a', 'b', 'c'], columns=['one', 'two']) + df2 = df.reindex(["a", "b", "c"], columns=["one", "two"]) df3 = df2 - df2.mean() @@ -1288,12 +1314,12 @@ It returns a tuple with both of the reindexed Series: .. ipython:: python - s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) + s = pd.Series(np.random.randn(5), index=["a", "b", "c", "d", "e"]) s1 = s[:4] s2 = s[1:] s1.align(s2) - s1.align(s2, join='inner') - s1.align(s2, join='left') + s1.align(s2, join="inner") + s1.align(s2, join="left") .. _basics.df_join: @@ -1302,13 +1328,13 @@ columns by default: .. ipython:: python - df.align(df2, join='inner') + df.align(df2, join="inner") You can also pass an ``axis`` option to only align on the specified axis: .. ipython:: python - df.align(df2, join='inner', axis=0) + df.align(df2, join="inner", axis=0) .. _basics.align.frame.series: @@ -1339,16 +1365,16 @@ We illustrate these fill methods on a simple Series: .. ipython:: python - rng = pd.date_range('1/3/2000', periods=8) + rng = pd.date_range("1/3/2000", periods=8) ts = pd.Series(np.random.randn(8), index=rng) ts2 = ts[[0, 3, 6]] ts ts2 ts2.reindex(ts.index) - ts2.reindex(ts.index, method='ffill') - ts2.reindex(ts.index, method='bfill') - ts2.reindex(ts.index, method='nearest') + ts2.reindex(ts.index, method="ffill") + ts2.reindex(ts.index, method="bfill") + ts2.reindex(ts.index, method="nearest") These methods require that the indexes are **ordered** increasing or decreasing. @@ -1359,7 +1385,7 @@ Note that the same result could have been achieved using .. ipython:: python - ts2.reindex(ts.index).fillna(method='ffill') + ts2.reindex(ts.index).fillna(method="ffill") :meth:`~Series.reindex` will raise a ValueError if the index is not monotonically increasing or decreasing. :meth:`~Series.fillna` and :meth:`~Series.interpolate` @@ -1376,14 +1402,14 @@ matches: .. ipython:: python - ts2.reindex(ts.index, method='ffill', limit=1) + ts2.reindex(ts.index, method="ffill", limit=1) In contrast, tolerance specifies the maximum distance between the index and indexer values: .. ipython:: python - ts2.reindex(ts.index, method='ffill', tolerance='1 day') + ts2.reindex(ts.index, method="ffill", tolerance="1 day") Notice that when used on a ``DatetimeIndex``, ``TimedeltaIndex`` or ``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible. @@ -1400,14 +1426,14 @@ It removes a set of labels from an axis: .. ipython:: python df - df.drop(['a', 'd'], axis=0) - df.drop(['one'], axis=1) + df.drop(["a", "d"], axis=0) + df.drop(["one"], axis=1) Note that the following also works, but is a bit less obvious / clean: .. ipython:: python - df.reindex(df.index.difference(['a', 'd'])) + df.reindex(df.index.difference(["a", "d"])) .. _basics.rename: @@ -1428,8 +1454,10 @@ Series can also be used: .. ipython:: python - df.rename(columns={'one': 'foo', 'two': 'bar'}, - index={'a': 'apple', 'b': 'banana', 'd': 'durian'}) + df.rename( + columns={"one": "foo", "two": "bar"}, + index={"a": "apple", "b": "banana", "d": "durian"}, + ) If the mapping doesn't include a column/index label, it isn't renamed. Note that extra labels in the mapping don't throw an error. @@ -1439,8 +1467,8 @@ you specify a single ``mapper`` and the ``axis`` to apply that mapping to. .. ipython:: python - df.rename({'one': 'foo', 'two': 'bar'}, axis='columns') - df.rename({'a': 'apple', 'b': 'banana', 'd': 'durian'}, axis='index') + df.rename({"one": "foo", "two": "bar"}, axis="columns") + df.rename({"a": "apple", "b": "banana", "d": "durian"}, axis="index") The :meth:`~DataFrame.rename` method also provides an ``inplace`` named @@ -1464,12 +1492,12 @@ labels). .. ipython:: python - df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], - 'y': [10, 20, 30, 40, 50, 60]}, - index=pd.MultiIndex.from_product([['a', 'b', 'c'], [1, 2]], - names=['let', 'num'])) + df = pd.DataFrame( + {"x": [1, 2, 3, 4, 5, 6], "y": [10, 20, 30, 40, 50, 60]}, + index=pd.MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["let", "num"]), + ) df - df.rename_axis(index={'let': 'abc'}) + df.rename_axis(index={"let": "abc"}) df.rename_axis(index=str.upper) .. _basics.iteration: @@ -1491,8 +1519,9 @@ Thus, for example, iterating over a DataFrame gives you the column names: .. ipython:: python - df = pd.DataFrame({'col1': np.random.randn(3), - 'col2': np.random.randn(3)}, index=['a', 'b', 'c']) + df = pd.DataFrame( + {"col1": np.random.randn(3), "col2": np.random.randn(3)}, index=["a", "b", "c"] + ) for col in df: print(col) @@ -1540,10 +1569,10 @@ To iterate over the rows of a DataFrame, you can use the following methods: .. ipython:: python - df = pd.DataFrame({'a': [1, 2, 3], 'b': ['a', 'b', 'c']}) + df = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) for index, row in df.iterrows(): - row['a'] = 10 + row["a"] = 10 df @@ -1576,7 +1605,7 @@ index value along with a Series containing the data in each row: .. ipython:: python for row_index, row in df.iterrows(): - print(row_index, row, sep='\n') + print(row_index, row, sep="\n") .. note:: @@ -1586,7 +1615,7 @@ index value along with a Series containing the data in each row: .. ipython:: python - df_orig = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) + df_orig = pd.DataFrame([[1, 1.5]], columns=["int", "float"]) df_orig.dtypes row = next(df_orig.iterrows())[1] row @@ -1596,8 +1625,8 @@ index value along with a Series containing the data in each row: .. ipython:: python - row['int'].dtype - df_orig['int'].dtype + row["int"].dtype + df_orig["int"].dtype To preserve dtypes while iterating over the rows, it is better to use :meth:`~DataFrame.itertuples` which returns namedtuples of the values @@ -1607,7 +1636,7 @@ For instance, a contrived way to transpose the DataFrame would be: .. ipython:: python - df2 = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]}) + df2 = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) print(df2) print(df2.T) @@ -1652,7 +1681,7 @@ This will return a Series, indexed like the existing Series. .. ipython:: python # datetime - s = pd.Series(pd.date_range('20130101 09:10:12', periods=4)) + s = pd.Series(pd.date_range("20130101 09:10:12", periods=4)) s s.dt.hour s.dt.second @@ -1668,7 +1697,7 @@ You can easily produces tz aware transformations: .. ipython:: python - stz = s.dt.tz_localize('US/Eastern') + stz = s.dt.tz_localize("US/Eastern") stz stz.dt.tz @@ -1676,7 +1705,7 @@ You can also chain these types of operations: .. ipython:: python - s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') + s.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") You can also format datetime values as strings with :meth:`Series.dt.strftime` which supports the same format as the standard :meth:`~datetime.datetime.strftime`. @@ -1684,23 +1713,23 @@ supports the same format as the standard :meth:`~datetime.datetime.strftime`. .. ipython:: python # DatetimeIndex - s = pd.Series(pd.date_range('20130101', periods=4)) + s = pd.Series(pd.date_range("20130101", periods=4)) s - s.dt.strftime('%Y/%m/%d') + s.dt.strftime("%Y/%m/%d") .. ipython:: python # PeriodIndex - s = pd.Series(pd.period_range('20130101', periods=4)) + s = pd.Series(pd.period_range("20130101", periods=4)) s - s.dt.strftime('%Y/%m/%d') + s.dt.strftime("%Y/%m/%d") The ``.dt`` accessor works for period and timedelta dtypes. .. ipython:: python # period - s = pd.Series(pd.period_range('20130101', periods=4, freq='D')) + s = pd.Series(pd.period_range("20130101", periods=4, freq="D")) s s.dt.year s.dt.day @@ -1708,7 +1737,7 @@ The ``.dt`` accessor works for period and timedelta dtypes. .. ipython:: python # timedelta - s = pd.Series(pd.timedelta_range('1 day 00:00:05', periods=4, freq='s')) + s = pd.Series(pd.timedelta_range("1 day 00:00:05", periods=4, freq="s")) s s.dt.days s.dt.seconds @@ -1729,8 +1758,9 @@ built-in string methods. For example: .. ipython:: python - s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'], - dtype="string") + s = pd.Series( + ["A", "B", "C", "Aaba", "Baca", np.nan, "CABA", "dog", "cat"], dtype="string" + ) s.str.lower() Powerful pattern-matching methods are provided as well, but note that @@ -1765,13 +1795,15 @@ used to sort a pandas object by its index levels. .. ipython:: python - df = pd.DataFrame({ - 'one': pd.Series(np.random.randn(3), index=['a', 'b', 'c']), - 'two': pd.Series(np.random.randn(4), index=['a', 'b', 'c', 'd']), - 'three': pd.Series(np.random.randn(3), index=['b', 'c', 'd'])}) + df = pd.DataFrame( + { + "one": pd.Series(np.random.randn(3), index=["a", "b", "c"]), + "two": pd.Series(np.random.randn(4), index=["a", "b", "c", "d"]), + "three": pd.Series(np.random.randn(3), index=["b", "c", "d"]), + } + ) - unsorted_df = df.reindex(index=['a', 'd', 'c', 'b'], - columns=['three', 'two', 'one']) + unsorted_df = df.reindex(index=["a", "d", "c", "b"], columns=["three", "two", "one"]) unsorted_df # DataFrame @@ -1780,7 +1812,7 @@ used to sort a pandas object by its index levels. unsorted_df.sort_index(axis=1) # Series - unsorted_df['three'].sort_index() + unsorted_df["three"].sort_index() .. _basics.sort_index_key: @@ -1792,11 +1824,9 @@ the key is applied per-level to the levels specified by ``level``. .. ipython:: python - s1 = pd.DataFrame({ - "a": ['B', 'a', 'C'], - "b": [1, 2, 3], - "c": [2, 3, 4] - }).set_index(list("ab")) + s1 = pd.DataFrame({"a": ["B", "a", "C"], "b": [1, 2, 3], "c": [2, 3, 4]}).set_index( + list("ab") + ) s1 .. ipython:: python @@ -1819,16 +1849,14 @@ to use to determine the sorted order. .. ipython:: python - df1 = pd.DataFrame({'one': [2, 1, 1, 1], - 'two': [1, 3, 2, 4], - 'three': [5, 4, 3, 2]}) - df1.sort_values(by='two') + df1 = pd.DataFrame({"one": [2, 1, 1, 1], "two": [1, 3, 2, 4], "three": [5, 4, 3, 2]}) + df1.sort_values(by="two") The ``by`` parameter can take a list of column names, e.g.: .. ipython:: python - df1[['one', 'two', 'three']].sort_values(by=['one', 'two']) + df1[["one", "two", "three"]].sort_values(by=["one", "two"]) These methods have special treatment of NA values via the ``na_position`` argument: @@ -1837,7 +1865,7 @@ argument: s[2] = np.nan s.sort_values() - s.sort_values(na_position='first') + s.sort_values(na_position="first") .. _basics.sort_value_key: @@ -1848,7 +1876,7 @@ to apply to the values being sorted. .. ipython:: python - s1 = pd.Series(['B', 'a', 'C']) + s1 = pd.Series(["B", "a", "C"]) .. ipython:: python @@ -1862,12 +1890,12 @@ a Series, e.g. .. ipython:: python - df = pd.DataFrame({"a": ['B', 'a', 'C'], "b": [1, 2, 3]}) + df = pd.DataFrame({"a": ["B", "a", "C"], "b": [1, 2, 3]}) .. ipython:: python - df.sort_values(by='a') - df.sort_values(by='a', key=lambda col: col.str.lower()) + df.sort_values(by="a") + df.sort_values(by="a", key=lambda col: col.str.lower()) The name or type of each column can be used to apply different functions to different columns. @@ -1883,20 +1911,20 @@ refer to either columns or index level names. .. ipython:: python # Build MultiIndex - idx = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('a', 2), - ('b', 2), ('b', 1), ('b', 1)]) - idx.names = ['first', 'second'] + idx = pd.MultiIndex.from_tuples( + [("a", 1), ("a", 2), ("a", 2), ("b", 2), ("b", 1), ("b", 1)] + ) + idx.names = ["first", "second"] # Build DataFrame - df_multi = pd.DataFrame({'A': np.arange(6, 0, -1)}, - index=idx) + df_multi = pd.DataFrame({"A": np.arange(6, 0, -1)}, index=idx) df_multi Sort by 'second' (index) and 'A' (column) .. ipython:: python - df_multi.sort_values(by=['second', 'A']) + df_multi.sort_values(by=["second", "A"]) .. note:: @@ -1917,8 +1945,8 @@ Series has the :meth:`~Series.searchsorted` method, which works similarly to ser = pd.Series([1, 2, 3]) ser.searchsorted([0, 3]) ser.searchsorted([0, 4]) - ser.searchsorted([1, 3], side='right') - ser.searchsorted([1, 3], side='left') + ser.searchsorted([1, 3], side="right") + ser.searchsorted([1, 3], side="left") ser = pd.Series([3, 1, 2]) ser.searchsorted([0, 3], sorter=np.argsort(ser)) @@ -1943,13 +1971,17 @@ faster than sorting the entire Series and calling ``head(n)`` on the result. .. ipython:: python - df = pd.DataFrame({'a': [-2, -1, 1, 10, 8, 11, -1], - 'b': list('abdceff'), - 'c': [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0]}) - df.nlargest(3, 'a') - df.nlargest(5, ['a', 'c']) - df.nsmallest(3, 'a') - df.nsmallest(5, ['a', 'c']) + df = pd.DataFrame( + { + "a": [-2, -1, 1, 10, 8, 11, -1], + "b": list("abdceff"), + "c": [1.0, 2.0, 4.0, 3.2, np.nan, 3.0, 4.0], + } + ) + df.nlargest(3, "a") + df.nlargest(5, ["a", "c"]) + df.nsmallest(3, "a") + df.nsmallest(5, ["a", "c"]) .. _basics.multiindex_sorting: @@ -1962,10 +1994,8 @@ all levels to ``by``. .. ipython:: python - df1.columns = pd.MultiIndex.from_tuples([('a', 'one'), - ('a', 'two'), - ('b', 'three')]) - df1.sort_values(by=('a', 'two')) + df1.columns = pd.MultiIndex.from_tuples([("a", "one"), ("a", "two"), ("b", "three")]) + df1.sort_values(by=("a", "two")) Copying @@ -2048,13 +2078,17 @@ with the data type of each column. .. ipython:: python - dft = pd.DataFrame({'A': np.random.rand(3), - 'B': 1, - 'C': 'foo', - 'D': pd.Timestamp('20010102'), - 'E': pd.Series([1.0] * 3).astype('float32'), - 'F': False, - 'G': pd.Series([1] * 3, dtype='int8')}) + dft = pd.DataFrame( + { + "A": np.random.rand(3), + "B": 1, + "C": "foo", + "D": pd.Timestamp("20010102"), + "E": pd.Series([1.0] * 3).astype("float32"), + "F": False, + "G": pd.Series([1] * 3, dtype="int8"), + } + ) dft dft.dtypes @@ -2062,7 +2096,7 @@ On a ``Series`` object, use the :attr:`~Series.dtype` attribute. .. ipython:: python - dft['A'].dtype + dft["A"].dtype If a pandas object contains data with multiple dtypes *in a single column*, the dtype of the column will be chosen to accommodate all of the data types @@ -2071,10 +2105,10 @@ dtype of the column will be chosen to accommodate all of the data types .. ipython:: python # these ints are coerced to floats - pd.Series([1, 2, 3, 4, 5, 6.]) + pd.Series([1, 2, 3, 4, 5, 6.0]) # string data forces an ``object`` dtype - pd.Series([1, 2, 3, 6., 'foo']) + pd.Series([1, 2, 3, 6.0, "foo"]) The number of columns of each type in a ``DataFrame`` can be found by calling ``DataFrame.dtypes.value_counts()``. @@ -2090,13 +2124,16 @@ different numeric dtypes will **NOT** be combined. The following example will gi .. ipython:: python - df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32') + df1 = pd.DataFrame(np.random.randn(8, 1), columns=["A"], dtype="float32") df1 df1.dtypes - df2 = pd.DataFrame({'A': pd.Series(np.random.randn(8), dtype='float16'), - 'B': pd.Series(np.random.randn(8)), - 'C': pd.Series(np.array(np.random.randn(8), - dtype='uint8'))}) + df2 = pd.DataFrame( + { + "A": pd.Series(np.random.randn(8), dtype="float16"), + "B": pd.Series(np.random.randn(8)), + "C": pd.Series(np.array(np.random.randn(8), dtype="uint8")), + } + ) df2 df2.dtypes @@ -2109,9 +2146,9 @@ The following will all result in ``int64`` dtypes. .. ipython:: python - pd.DataFrame([1, 2], columns=['a']).dtypes - pd.DataFrame({'a': [1, 2]}).dtypes - pd.DataFrame({'a': 1}, index=list(range(2))).dtypes + pd.DataFrame([1, 2], columns=["a"]).dtypes + pd.DataFrame({"a": [1, 2]}).dtypes + pd.DataFrame({"a": 1}, index=list(range(2))).dtypes Note that Numpy will choose *platform-dependent* types when creating arrays. The following **WILL** result in ``int32`` on 32-bit platform. @@ -2159,15 +2196,15 @@ then the more *general* one will be used as the result of the operation. df3.dtypes # conversion of dtypes - df3.astype('float32').dtypes + df3.astype("float32").dtypes Convert a subset of columns to a specified type using :meth:`~DataFrame.astype`. .. ipython:: python - dft = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) - dft[['a', 'b']] = dft[['a', 'b']].astype(np.uint8) + dft = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + dft[["a", "b"]] = dft[["a", "b"]].astype(np.uint8) dft dft.dtypes @@ -2175,8 +2212,8 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra .. ipython:: python - dft1 = pd.DataFrame({'a': [1, 0, 1], 'b': [4, 5, 6], 'c': [7, 8, 9]}) - dft1 = dft1.astype({'a': np.bool, 'c': np.float64}) + dft1 = pd.DataFrame({"a": [1, 0, 1], "b": [4, 5, 6], "c": [7, 8, 9]}) + dft1 = dft1.astype({"a": np.bool, "c": np.float64}) dft1 dft1.dtypes @@ -2188,9 +2225,9 @@ Convert certain columns to a specific dtype by passing a dict to :meth:`~DataFra .. ipython:: python - dft = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) - dft.loc[:, ['a', 'b']].astype(np.uint8).dtypes - dft.loc[:, ['a', 'b']] = dft.loc[:, ['a', 'b']].astype(np.uint8) + dft = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + dft.loc[:, ["a", "b"]].astype(np.uint8).dtypes + dft.loc[:, ["a", "b"]] = dft.loc[:, ["a", "b"]].astype(np.uint8) dft.dtypes .. _basics.object_conversion: @@ -2206,10 +2243,10 @@ to the correct type. .. ipython:: python import datetime - df = pd.DataFrame([[1, 2], - ['a', 'b'], - [datetime.datetime(2016, 3, 2), - datetime.datetime(2016, 3, 2)]]) + + df = pd.DataFrame( + [[1, 2], ["a", "b"], [datetime.datetime(2016, 3, 2), datetime.datetime(2016, 3, 2)]] + ) df = df.T df df.dtypes @@ -2228,7 +2265,7 @@ hard conversion of objects to a specified type: .. ipython:: python - m = ['1.1', 2, 3] + m = ["1.1", 2, 3] pd.to_numeric(m) * :meth:`~pandas.to_datetime` (conversion to datetime objects) @@ -2236,14 +2273,15 @@ hard conversion of objects to a specified type: .. ipython:: python import datetime - m = ['2016-07-09', datetime.datetime(2016, 3, 2)] + + m = ["2016-07-09", datetime.datetime(2016, 3, 2)] pd.to_datetime(m) * :meth:`~pandas.to_timedelta` (conversion to timedelta objects) .. ipython:: python - m = ['5us', pd.Timedelta('1day')] + m = ["5us", pd.Timedelta("1day")] pd.to_timedelta(m) To force a conversion, we can pass in an ``errors`` argument, which specifies how pandas should deal with elements @@ -2256,14 +2294,15 @@ non-conforming elements intermixed that you want to represent as missing: .. ipython:: python import datetime - m = ['apple', datetime.datetime(2016, 3, 2)] - pd.to_datetime(m, errors='coerce') - m = ['apple', 2, 3] - pd.to_numeric(m, errors='coerce') + m = ["apple", datetime.datetime(2016, 3, 2)] + pd.to_datetime(m, errors="coerce") - m = ['apple', pd.Timedelta('1day')] - pd.to_timedelta(m, errors='coerce') + m = ["apple", 2, 3] + pd.to_numeric(m, errors="coerce") + + m = ["apple", pd.Timedelta("1day")] + pd.to_timedelta(m, errors="coerce") The ``errors`` parameter has a third option of ``errors='ignore'``, which will simply return the passed in data if it encounters any errors with the conversion to a desired data type: @@ -2271,25 +2310,26 @@ encounters any errors with the conversion to a desired data type: .. ipython:: python import datetime - m = ['apple', datetime.datetime(2016, 3, 2)] - pd.to_datetime(m, errors='ignore') - m = ['apple', 2, 3] - pd.to_numeric(m, errors='ignore') + m = ["apple", datetime.datetime(2016, 3, 2)] + pd.to_datetime(m, errors="ignore") + + m = ["apple", 2, 3] + pd.to_numeric(m, errors="ignore") - m = ['apple', pd.Timedelta('1day')] - pd.to_timedelta(m, errors='ignore') + m = ["apple", pd.Timedelta("1day")] + pd.to_timedelta(m, errors="ignore") In addition to object conversion, :meth:`~pandas.to_numeric` provides another argument ``downcast``, which gives the option of downcasting the newly (or already) numeric data to a smaller dtype, which can conserve memory: .. ipython:: python - m = ['1', 2, 3] - pd.to_numeric(m, downcast='integer') # smallest signed int dtype - pd.to_numeric(m, downcast='signed') # same as 'integer' - pd.to_numeric(m, downcast='unsigned') # smallest unsigned int dtype - pd.to_numeric(m, downcast='float') # smallest float dtype + m = ["1", 2, 3] + pd.to_numeric(m, downcast="integer") # smallest signed int dtype + pd.to_numeric(m, downcast="signed") # same as 'integer' + pd.to_numeric(m, downcast="unsigned") # smallest unsigned int dtype + pd.to_numeric(m, downcast="float") # smallest float dtype As these methods apply only to one-dimensional arrays, lists or scalars; they cannot be used directly on multi-dimensional objects such as DataFrames. However, with :meth:`~pandas.DataFrame.apply`, we can "apply" the function over each column efficiently: @@ -2297,16 +2337,16 @@ as DataFrames. However, with :meth:`~pandas.DataFrame.apply`, we can "apply" the .. ipython:: python import datetime - df = pd.DataFrame([ - ['2016-07-09', datetime.datetime(2016, 3, 2)]] * 2, dtype='O') + + df = pd.DataFrame([["2016-07-09", datetime.datetime(2016, 3, 2)]] * 2, dtype="O") df df.apply(pd.to_datetime) - df = pd.DataFrame([['1.1', 2, 3]] * 2, dtype='O') + df = pd.DataFrame([["1.1", 2, 3]] * 2, dtype="O") df df.apply(pd.to_numeric) - df = pd.DataFrame([['5us', pd.Timedelta('1day')]] * 2, dtype='O') + df = pd.DataFrame([["5us", pd.Timedelta("1day")]] * 2, dtype="O") df df.apply(pd.to_timedelta) @@ -2319,8 +2359,8 @@ See also :ref:`Support for integer NA `. .. ipython:: python - dfi = df3.astype('int32') - dfi['E'] = 1 + dfi = df3.astype("int32") + dfi["E"] = 1 dfi dfi.dtypes @@ -2333,7 +2373,7 @@ While float dtypes are unchanged. .. ipython:: python dfa = df3.copy() - dfa['A'] = dfa['A'].astype('float32') + dfa["A"] = dfa["A"].astype("float32") dfa.dtypes casted = dfa[df2 > 0] @@ -2353,18 +2393,22 @@ dtypes: .. ipython:: python - df = pd.DataFrame({'string': list('abc'), - 'int64': list(range(1, 4)), - 'uint8': np.arange(3, 6).astype('u1'), - 'float64': np.arange(4.0, 7.0), - 'bool1': [True, False, True], - 'bool2': [False, True, False], - 'dates': pd.date_range('now', periods=3), - 'category': pd.Series(list("ABC")).astype('category')}) - df['tdeltas'] = df.dates.diff() - df['uint64'] = np.arange(3, 6).astype('u8') - df['other_dates'] = pd.date_range('20130101', periods=3) - df['tz_aware_dates'] = pd.date_range('20130101', periods=3, tz='US/Eastern') + df = pd.DataFrame( + { + "string": list("abc"), + "int64": list(range(1, 4)), + "uint8": np.arange(3, 6).astype("u1"), + "float64": np.arange(4.0, 7.0), + "bool1": [True, False, True], + "bool2": [False, True, False], + "dates": pd.date_range("now", periods=3), + "category": pd.Series(list("ABC")).astype("category"), + } + ) + df["tdeltas"] = df.dates.diff() + df["uint64"] = np.arange(3, 6).astype("u8") + df["other_dates"] = pd.date_range("20130101", periods=3) + df["tz_aware_dates"] = pd.date_range("20130101", periods=3, tz="US/Eastern") df And the dtypes: @@ -2388,7 +2432,7 @@ You can also pass the name of a dtype in the `NumPy dtype hierarchy .. ipython:: python - df.select_dtypes(include=['bool']) + df.select_dtypes(include=["bool"]) :meth:`~pandas.DataFrame.select_dtypes` also works with generic dtypes as well. @@ -2397,13 +2441,13 @@ integers: .. ipython:: python - df.select_dtypes(include=['number', 'bool'], exclude=['unsignedinteger']) + df.select_dtypes(include=["number", "bool"], exclude=["unsignedinteger"]) To select string columns you must use the ``object`` dtype: .. ipython:: python - df.select_dtypes(include=['object']) + df.select_dtypes(include=["object"]) To see all the child dtypes of a generic ``dtype`` like ``numpy.number`` you can define a function that returns a tree of child dtypes: From 7031ad397959cdb71390568cd6ed04c6c16c4f8a Mon Sep 17 00:00:00 2001 From: Maria Ilie Date: Fri, 2 Oct 2020 14:11:31 -0700 Subject: [PATCH 2/4] fixing setup.cfg --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 73986f692b6cd..211e8ebede8b6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,6 +41,7 @@ bootstrap = pd # (in some cases we want to do it to show users) ignore = E402, # module level import not at top of file W503, # line break before binary operator + E203, # space before : (needed for how black formats slicing) # Classes/functions in different blocks can generate those errors E302, # expected 2 blank lines, found 0 E305, # expected 2 blank lines after class or function definition, found 0 From 9616e061cbc3f2035a9c1944d4f638f40a10dae1 Mon Sep 17 00:00:00 2001 From: Maria Ilie Date: Sun, 4 Oct 2020 13:34:53 -0700 Subject: [PATCH 3/4] fixed indentation --- doc/source/user_guide/10min.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst index f7864b4a5b9cc..08f83a4674ada 100644 --- a/doc/source/user_guide/10min.rst +++ b/doc/source/user_guide/10min.rst @@ -670,7 +670,7 @@ pandas can include categorical data in a :class:`DataFrame`. For full docs, see df = pd.DataFrame( {"id": [1, 2, 3, 4, 5, 6], "raw_grade": ["a", "b", "b", "a", "a", "e"]} ) - + Convert the raw grades to a categorical data type. From 9d00e5efa991409619fb86a862fdc9ff96110e87 Mon Sep 17 00:00:00 2001 From: Maria-Alexandra Ilie <30919494+maria-ilie@users.noreply.github.com> Date: Sun, 4 Oct 2020 18:35:20 -0700 Subject: [PATCH 4/4] Update doc/source/user_guide/basics.rst Co-authored-by: Daniel Saxton <2658661+dsaxton@users.noreply.github.com> --- doc/source/user_guide/basics.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user_guide/basics.rst b/doc/source/user_guide/basics.rst index 2a403035d1706..9ef91a9604d39 100644 --- a/doc/source/user_guide/basics.rst +++ b/doc/source/user_guide/basics.rst @@ -809,7 +809,7 @@ Is equivalent to: .. ipython:: python - (df_p.pipe(extract_city_name).pipe(add_country_name, country_name="US")) + df_p.pipe(extract_city_name).pipe(add_country_name, country_name="US") Pandas encourages the second style, which is known as method chaining. ``pipe`` makes it easy to use your own or another library's functions