diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index 443250592a4a7..aa2749c85a232 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -49,8 +49,8 @@ talking about: :okwarning: import pandas as pd - df = pd.DataFrame(np.random.randn(6, 4), - index=pd.date_range('1/1/2000', periods=6)) + + df = pd.DataFrame(np.random.randn(6, 4), index=pd.date_range("1/1/2000", periods=6)) df # deprecated now df - df[0] @@ -184,12 +184,14 @@ labeled the aggregated group with the end of the interval: the next day). import io - data = ('a,b,c\n' - '1,Yes,2\n' - '3,No,4') + data = """ + a,b,c + 1,Yes,2 + 3,No,4 + """ print(data) pd.read_csv(io.StringIO(data), header=None) - pd.read_csv(io.StringIO(data), header=None, prefix='X') + pd.read_csv(io.StringIO(data), header=None, prefix="X") - Values like ``'Yes'`` and ``'No'`` are not interpreted as boolean by default, though this can be controlled by new ``true_values`` and ``false_values`` @@ -199,7 +201,7 @@ labeled the aggregated group with the end of the interval: the next day). print(data) pd.read_csv(io.StringIO(data)) - pd.read_csv(io.StringIO(data), true_values=['Yes'], false_values=['No']) + pd.read_csv(io.StringIO(data), true_values=["Yes"], false_values=["No"]) - The file parsers will not recognize non-string values arising from a converter function as NA if passed in the ``na_values`` argument. It's better @@ -210,10 +212,10 @@ labeled the aggregated group with the end of the interval: the next day). .. ipython:: python - s = pd.Series([np.nan, 1., 2., np.nan, 4]) + s = pd.Series([np.nan, 1.0, 2.0, np.nan, 4]) s s.fillna(0) - s.fillna(method='pad') + s.fillna(method="pad") Convenience methods ``ffill`` and ``bfill`` have been added: @@ -229,7 +231,8 @@ Convenience methods ``ffill`` and ``bfill`` have been added: .. ipython:: python def f(x): - return pd.Series([x, x**2], index=['x', 'x^2']) + return pd.Series([x, x ** 2], index=["x", "x^2"]) + s = pd.Series(np.random.rand(5)) s @@ -272,20 +275,20 @@ The old behavior of printing out summary information can be achieved via the .. ipython:: python - pd.set_option('expand_frame_repr', False) + pd.set_option("expand_frame_repr", False) wide_frame .. ipython:: python :suppress: - pd.reset_option('expand_frame_repr') + pd.reset_option("expand_frame_repr") The width of each line can be changed via 'line_width' (80 by default): .. code-block:: python - pd.set_option('line_width', 40) + pd.set_option("line_width", 40) wide_frame diff --git a/doc/source/whatsnew/v0.10.1.rst b/doc/source/whatsnew/v0.10.1.rst index 3dc680c46a4d9..d71a0d5ca68cd 100644 --- a/doc/source/whatsnew/v0.10.1.rst +++ b/doc/source/whatsnew/v0.10.1.rst @@ -45,29 +45,31 @@ You may need to upgrade your existing data files. Please visit the import os - os.remove('store.h5') + os.remove("store.h5") You can designate (and index) certain columns that you want to be able to perform queries on a table, by passing a list to ``data_columns`` .. ipython:: python - store = pd.HDFStore('store.h5') - df = pd.DataFrame(np.random.randn(8, 3), - index=pd.date_range('1/1/2000', periods=8), - columns=['A', 'B', 'C']) - df['string'] = 'foo' - df.loc[df.index[4:6], 'string'] = np.nan - df.loc[df.index[7:9], 'string'] = 'bar' - df['string2'] = 'cool' + store = pd.HDFStore("store.h5") + df = pd.DataFrame( + np.random.randn(8, 3), + index=pd.date_range("1/1/2000", periods=8), + columns=["A", "B", "C"], + ) + df["string"] = "foo" + df.loc[df.index[4:6], "string"] = np.nan + df.loc[df.index[7:9], "string"] = "bar" + df["string2"] = "cool" df # on-disk operations - store.append('df', df, data_columns=['B', 'C', 'string', 'string2']) - store.select('df', "B>0 and string=='foo'") + store.append("df", df, data_columns=["B", "C", "string", "string2"]) + store.select("df", "B>0 and string=='foo'") # this is in-memory version of this type of selection - df[(df.B > 0) & (df.string == 'foo')] + df[(df.B > 0) & (df.string == "foo")] Retrieving unique values in an indexable or data column. @@ -75,19 +77,19 @@ Retrieving unique values in an indexable or data column. # note that this is deprecated as of 0.14.0 # can be replicated by: store.select_column('df','index').unique() - store.unique('df', 'index') - store.unique('df', 'string') + store.unique("df", "index") + store.unique("df", "string") You can now store ``datetime64`` in data columns .. ipython:: python df_mixed = df.copy() - df_mixed['datetime64'] = pd.Timestamp('20010102') - df_mixed.loc[df_mixed.index[3:4], ['A', 'B']] = np.nan + df_mixed["datetime64"] = pd.Timestamp("20010102") + df_mixed.loc[df_mixed.index[3:4], ["A", "B"]] = np.nan - store.append('df_mixed', df_mixed) - df_mixed1 = store.select('df_mixed') + store.append("df_mixed", df_mixed) + df_mixed1 = store.select("df_mixed") df_mixed1 df_mixed1.dtypes.value_counts() @@ -97,7 +99,7 @@ columns, this is equivalent to passing a .. ipython:: python - store.select('df', columns=['A', 'B']) + store.select("df", columns=["A", "B"]) ``HDFStore`` now serializes MultiIndex dataframes when appending tables. @@ -160,29 +162,31 @@ combined result, by using ``where`` on a selector table. .. ipython:: python - df_mt = pd.DataFrame(np.random.randn(8, 6), - index=pd.date_range('1/1/2000', periods=8), - columns=['A', 'B', 'C', 'D', 'E', 'F']) - df_mt['foo'] = 'bar' + df_mt = pd.DataFrame( + np.random.randn(8, 6), + index=pd.date_range("1/1/2000", periods=8), + columns=["A", "B", "C", "D", "E", "F"], + ) + df_mt["foo"] = "bar" # you can also create the tables individually - store.append_to_multiple({'df1_mt': ['A', 'B'], 'df2_mt': None}, - df_mt, selector='df1_mt') + store.append_to_multiple( + {"df1_mt": ["A", "B"], "df2_mt": None}, df_mt, selector="df1_mt" + ) store # individual tables were created - store.select('df1_mt') - store.select('df2_mt') + store.select("df1_mt") + store.select("df2_mt") # as a multiple - store.select_as_multiple(['df1_mt', 'df2_mt'], where=['A>0', 'B>0'], - selector='df1_mt') + store.select_as_multiple(["df1_mt", "df2_mt"], where=["A>0", "B>0"], selector="df1_mt") .. ipython:: python :suppress: store.close() - os.remove('store.h5') + os.remove("store.h5") **Enhancements** diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst index 9971ae22822f6..4de76510c6bc1 100644 --- a/doc/source/whatsnew/v0.12.0.rst +++ b/doc/source/whatsnew/v0.12.0.rst @@ -47,7 +47,7 @@ API changes .. ipython:: python - p = pd.DataFrame({'first': [4, 5, 8], 'second': [0, 0, 3]}) + p = pd.DataFrame({"first": [4, 5, 8], "second": [0, 0, 3]}) p % 0 p % p p / p @@ -95,8 +95,8 @@ API changes .. ipython:: python - df = pd.DataFrame(range(5), index=list('ABCDE'), columns=['a']) - mask = (df.a % 2 == 0) + df = pd.DataFrame(range(5), index=list("ABCDE"), columns=["a"]) + mask = df.a % 2 == 0 mask # this is what you should use @@ -141,21 +141,24 @@ API changes .. code-block:: python from pandas.io.parsers import ExcelFile - xls = ExcelFile('path_to_file.xls') - xls.parse('Sheet1', index_col=None, na_values=['NA']) + + xls = ExcelFile("path_to_file.xls") + xls.parse("Sheet1", index_col=None, na_values=["NA"]) With .. code-block:: python import pandas as pd - pd.read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA']) + + pd.read_excel("path_to_file.xls", "Sheet1", index_col=None, na_values=["NA"]) - added top-level function ``read_sql`` that is equivalent to the following .. code-block:: python from pandas.io.sql import read_frame + read_frame(...) - ``DataFrame.to_html`` and ``DataFrame.to_latex`` now accept a path for @@ -200,7 +203,7 @@ IO enhancements .. ipython:: python :okwarning: - df = pd.DataFrame({'a': range(3), 'b': list('abc')}) + df = pd.DataFrame({"a": range(3), "b": list("abc")}) print(df) html = df.to_html() alist = pd.read_html(html, index_col=0) @@ -248,16 +251,18 @@ IO enhancements .. ipython:: python from pandas._testing import makeCustomDataframe as mkdf + df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) - df.to_csv('mi.csv') - print(open('mi.csv').read()) - pd.read_csv('mi.csv', header=[0, 1, 2, 3], index_col=[0, 1]) + df.to_csv("mi.csv") + print(open("mi.csv").read()) + pd.read_csv("mi.csv", header=[0, 1, 2, 3], index_col=[0, 1]) .. ipython:: python :suppress: import os - os.remove('mi.csv') + + os.remove("mi.csv") - Support for ``HDFStore`` (via ``PyTables 3.0.0``) on Python3 @@ -304,8 +309,8 @@ Other enhancements .. ipython:: python - df = pd.DataFrame({'a': list('ab..'), 'b': [1, 2, 3, 4]}) - df.replace(regex=r'\s*\.\s*', value=np.nan) + df = pd.DataFrame({"a": list("ab.."), "b": [1, 2, 3, 4]}) + df.replace(regex=r"\s*\.\s*", value=np.nan) to replace all occurrences of the string ``'.'`` with zero or more instances of surrounding white space with ``NaN``. @@ -314,7 +319,7 @@ Other enhancements .. ipython:: python - df.replace('.', np.nan) + df.replace(".", np.nan) to replace all occurrences of the string ``'.'`` with ``NaN``. @@ -359,8 +364,8 @@ Other enhancements .. ipython:: python - dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')}) - dff.groupby('B').filter(lambda x: len(x) > 2) + dff = pd.DataFrame({"A": np.arange(8), "B": list("aabbbbcc")}) + dff.groupby("B").filter(lambda x: len(x) > 2) Alternatively, instead of dropping the offending groups, we can return a like-indexed objects where the groups that do not pass the filter are @@ -368,7 +373,7 @@ Other enhancements .. ipython:: python - dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False) + dff.groupby("B").filter(lambda x: len(x) > 2, dropna=False) - Series and DataFrame hist methods now take a ``figsize`` argument (:issue:`3834`) @@ -397,17 +402,18 @@ Experimental features from pandas.tseries.offsets import CustomBusinessDay from datetime import datetime + # As an interesting example, let's look at Egypt where # a Friday-Saturday weekend is observed. - weekmask_egypt = 'Sun Mon Tue Wed Thu' + weekmask_egypt = "Sun Mon Tue Wed Thu" # They also observe International Workers' Day so let's # add that for a couple of years - holidays = ['2012-05-01', datetime(2013, 5, 1), np.datetime64('2014-05-01')] + holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")] bday_egypt = CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt) dt = datetime(2013, 4, 30) print(dt + 2 * bday_egypt) dts = pd.date_range(dt, periods=5, freq=bday_egypt) - print(pd.Series(dts.weekday, dts).map(pd.Series('Mon Tue Wed Thu Fri Sat Sun'.split()))) + print(pd.Series(dts.weekday, dts).map(pd.Series("Mon Tue Wed Thu Fri Sat Sun".split()))) Bug fixes ~~~~~~~~~ @@ -430,14 +436,14 @@ Bug fixes .. ipython:: python :okwarning: - strs = 'go', 'bow', 'joe', 'slow' + strs = "go", "bow", "joe", "slow" ds = pd.Series(strs) for s in ds.str: print(s) s - s.dropna().values.item() == 'w' + s.dropna().values.item() == "w" The last element yielded by the iterator will be a ``Series`` containing the last element of the longest string in the ``Series`` with all other diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst index 9e416f8eeb3f1..1215786b4cccc 100644 --- a/doc/source/whatsnew/v0.13.1.rst +++ b/doc/source/whatsnew/v0.13.1.rst @@ -31,16 +31,16 @@ Highlights include: .. ipython:: python - df = pd.DataFrame({'A': np.array(['foo', 'bar', 'bah', 'foo', 'bar'])}) - df['A'].iloc[0] = np.nan + df = pd.DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) + df["A"].iloc[0] = np.nan df The recommended way to do this type of assignment is: .. ipython:: python - df = pd.DataFrame({'A': np.array(['foo', 'bar', 'bah', 'foo', 'bar'])}) - df.loc[0, 'A'] = np.nan + df = pd.DataFrame({"A": np.array(["foo", "bar", "bah", "foo", "bar"])}) + df.loc[0, "A"] = np.nan df Output formatting enhancements @@ -52,24 +52,27 @@ Output formatting enhancements .. ipython:: python - max_info_rows = pd.get_option('max_info_rows') + max_info_rows = pd.get_option("max_info_rows") - df = pd.DataFrame({'A': np.random.randn(10), - 'B': np.random.randn(10), - 'C': pd.date_range('20130101', periods=10) - }) + df = pd.DataFrame( + { + "A": np.random.randn(10), + "B": np.random.randn(10), + "C": pd.date_range("20130101", periods=10), + } + ) df.iloc[3:6, [0, 2]] = np.nan .. ipython:: python # set to not display the null counts - pd.set_option('max_info_rows', 0) + pd.set_option("max_info_rows", 0) df.info() .. ipython:: python # this is the default (same as in 0.13.0) - pd.set_option('max_info_rows', max_info_rows) + pd.set_option("max_info_rows", max_info_rows) df.info() - Add ``show_dimensions`` display option for the new DataFrame repr to control whether the dimensions print. @@ -77,10 +80,10 @@ Output formatting enhancements .. ipython:: python df = pd.DataFrame([[1, 2], [3, 4]]) - pd.set_option('show_dimensions', False) + pd.set_option("show_dimensions", False) df - pd.set_option('show_dimensions', True) + pd.set_option("show_dimensions", True) df - The ``ArrayFormatter`` for ``datetime`` and ``timedelta64`` now intelligently @@ -98,10 +101,9 @@ Output formatting enhancements .. ipython:: python - df = pd.DataFrame([pd.Timestamp('20010101'), - pd.Timestamp('20040601')], columns=['age']) - df['today'] = pd.Timestamp('20130419') - df['diff'] = df['today'] - df['age'] + df = pd.DataFrame([pd.Timestamp("20010101"), pd.Timestamp("20040601")], columns=["age"]) + df["today"] = pd.Timestamp("20130419") + df["diff"] = df["today"] - df["age"] df API changes @@ -115,8 +117,8 @@ API changes .. ipython:: python - s = pd.Series(['a', 'a|b', np.nan, 'a|c']) - s.str.get_dummies(sep='|') + s = pd.Series(["a", "a|b", np.nan, "a|c"]) + s.str.get_dummies(sep="|") - Added the ``NDFrame.equals()`` method to compare if two NDFrames are equal have equal axes, dtypes, and values. Added the @@ -126,8 +128,8 @@ API changes .. code-block:: python - df = pd.DataFrame({'col': ['foo', 0, np.nan]}) - df2 = pd.DataFrame({'col': [np.nan, 0, 'foo']}, index=[2, 1, 0]) + df = pd.DataFrame({"col": ["foo", 0, np.nan]}) + df2 = pd.DataFrame({"col": [np.nan, 0, "foo"]}, index=[2, 1, 0]) df.equals(df2) df.equals(df2.sort_index()) @@ -204,8 +206,7 @@ Enhancements .. code-block:: python # Try to infer the format for the index column - df = pd.read_csv('foo.csv', index_col=0, parse_dates=True, - infer_datetime_format=True) + df = pd.read_csv("foo.csv", index_col=0, parse_dates=True, infer_datetime_format=True) - ``date_format`` and ``datetime_format`` keywords can now be specified when writing to ``excel`` files (:issue:`4133`) @@ -215,10 +216,10 @@ Enhancements .. ipython:: python - shades = ['light', 'dark'] - colors = ['red', 'green', 'blue'] + shades = ["light", "dark"] + colors = ["red", "green", "blue"] - pd.MultiIndex.from_product([shades, colors], names=['shade', 'color']) + pd.MultiIndex.from_product([shades, colors], names=["shade", "color"]) - Panel :meth:`~pandas.Panel.apply` will work on non-ufuncs. See :ref:`the docs`. diff --git a/doc/source/whatsnew/v0.14.1.rst b/doc/source/whatsnew/v0.14.1.rst index 354d67a525d0e..78fd182ea86c3 100644 --- a/doc/source/whatsnew/v0.14.1.rst +++ b/doc/source/whatsnew/v0.14.1.rst @@ -68,7 +68,8 @@ API changes :suppress: import pandas.tseries.offsets as offsets - d = pd.Timestamp('2014-01-01 09:00') + + d = pd.Timestamp("2014-01-01 09:00") .. ipython:: python @@ -100,10 +101,10 @@ Enhancements import pandas.tseries.offsets as offsets day = offsets.Day() - day.apply(pd.Timestamp('2014-01-01 09:00')) + day.apply(pd.Timestamp("2014-01-01 09:00")) day = offsets.Day(normalize=True) - day.apply(pd.Timestamp('2014-01-01 09:00')) + day.apply(pd.Timestamp("2014-01-01 09:00")) - ``PeriodIndex`` is represented as the same format as ``DatetimeIndex`` (:issue:`7601`) - ``StringMethods`` now work on empty Series (:issue:`7242`) @@ -123,8 +124,7 @@ Enhancements .. ipython:: python - rng = pd.date_range('3/6/2012 00:00', periods=10, freq='D', - tz='dateutil/Europe/London') + rng = pd.date_range("3/6/2012 00:00", periods=10, freq="D", tz="dateutil/Europe/London") rng.tz See :ref:`the docs `. diff --git a/doc/source/whatsnew/v0.15.1.rst b/doc/source/whatsnew/v0.15.1.rst index da56f07e84d9f..a1d4f9d14a905 100644 --- a/doc/source/whatsnew/v0.15.1.rst +++ b/doc/source/whatsnew/v0.15.1.rst @@ -23,7 +23,7 @@ API changes .. ipython:: python - s = pd.Series(pd.date_range('20130101', periods=5, freq='D')) + s = pd.Series(pd.date_range("20130101", periods=5, freq="D")) s.iloc[2] = np.nan s @@ -52,8 +52,7 @@ API changes .. ipython:: python np.random.seed(2718281) - df = pd.DataFrame(np.random.randint(0, 100, (10, 2)), - columns=['jim', 'joe']) + df = pd.DataFrame(np.random.randint(0, 100, (10, 2)), columns=["jim", "joe"]) df.head() ts = pd.Series(5 * np.random.randint(0, 3, 10)) @@ -80,9 +79,9 @@ API changes .. ipython:: python - df = pd.DataFrame({'jim': range(5), 'joe': range(5, 10)}) + df = pd.DataFrame({"jim": range(5), "joe": range(5, 10)}) df - gr = df.groupby(df['jim'] < 2) + gr = df.groupby(df["jim"] < 2) previous behavior (excludes 1st column from output): @@ -106,7 +105,7 @@ API changes .. ipython:: python - s = pd.Series(['a', 'b', 'c', 'd'], [4, 3, 2, 1]) + s = pd.Series(["a", "b", "c", "d"], [4, 3, 2, 1]) s previous behavior: @@ -208,6 +207,7 @@ Enhancements .. ipython:: python from collections import deque + df1 = pd.DataFrame([1, 2, 3]) df2 = pd.DataFrame([4, 5, 6]) @@ -228,8 +228,9 @@ Enhancements .. ipython:: python - dfi = pd.DataFrame(1, index=pd.MultiIndex.from_product([['a'], - range(1000)]), columns=['A']) + dfi = pd.DataFrame( + 1, index=pd.MultiIndex.from_product([["a"], range(1000)]), columns=["A"] + ) previous behavior: diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst index a89ede8f024a0..39767684c01d0 100644 --- a/doc/source/whatsnew/v0.16.1.rst +++ b/doc/source/whatsnew/v0.16.1.rst @@ -209,9 +209,8 @@ when sampling from rows. .. ipython:: python - df = pd.DataFrame({'col1': [9, 8, 7, 6], - 'weight_column': [0.5, 0.4, 0.1, 0]}) - df.sample(n=3, weights='weight_column') + df = pd.DataFrame({"col1": [9, 8, 7, 6], "weight_column": [0.5, 0.4, 0.1, 0]}) + df.sample(n=3, weights="weight_column") .. _whatsnew_0161.enhancements.string: @@ -229,7 +228,7 @@ enhancements make string operations easier and more consistent with standard pyt .. ipython:: python - idx = pd.Index([' jack', 'jill ', ' jesse ', 'frank']) + idx = pd.Index([" jack", "jill ", " jesse ", "frank"]) idx.str.strip() One special case for the ``.str`` accessor on ``Index`` is that if a string method returns ``bool``, the ``.str`` accessor @@ -238,11 +237,11 @@ enhancements make string operations easier and more consistent with standard pyt .. ipython:: python - idx = pd.Index(['a1', 'a2', 'b1', 'b2']) + idx = pd.Index(["a1", "a2", "b1", "b2"]) s = pd.Series(range(4), index=idx) s - idx.str.startswith('a') - s[s.index.str.startswith('a')] + idx.str.startswith("a") + s[s.index.str.startswith("a")] - The following new methods are accessible via ``.str`` accessor to apply the function to each values. (:issue:`9766`, :issue:`9773`, :issue:`10031`, :issue:`10045`, :issue:`10052`) @@ -257,21 +256,21 @@ enhancements make string operations easier and more consistent with standard pyt .. ipython:: python - s = pd.Series(['a,b', 'a,c', 'b,c']) + s = pd.Series(["a,b", "a,c", "b,c"]) # return Series - s.str.split(',') + s.str.split(",") # return DataFrame - s.str.split(',', expand=True) + s.str.split(",", expand=True) - idx = pd.Index(['a,b', 'a,c', 'b,c']) + idx = pd.Index(["a,b", "a,c", "b,c"]) # return Index - idx.str.split(',') + idx.str.split(",") # return MultiIndex - idx.str.split(',', expand=True) + idx.str.split(",", expand=True) - Improved ``extract`` and ``get_dummies`` methods for ``Index.str`` (:issue:`9980`) @@ -286,9 +285,9 @@ Other enhancements .. ipython:: python - pd.Timestamp('2014-08-01 09:00') + pd.tseries.offsets.BusinessHour() - pd.Timestamp('2014-08-01 07:00') + pd.tseries.offsets.BusinessHour() - pd.Timestamp('2014-08-01 16:30') + pd.tseries.offsets.BusinessHour() + pd.Timestamp("2014-08-01 09:00") + pd.tseries.offsets.BusinessHour() + pd.Timestamp("2014-08-01 07:00") + pd.tseries.offsets.BusinessHour() + pd.Timestamp("2014-08-01 16:30") + pd.tseries.offsets.BusinessHour() - ``DataFrame.diff`` now takes an ``axis`` parameter that determines the direction of differencing (:issue:`9727`) @@ -300,8 +299,8 @@ Other enhancements .. ipython:: python - df = pd.DataFrame(np.random.randn(3, 3), columns=['A', 'B', 'C']) - df.drop(['A', 'X'], axis=1, errors='ignore') + df = pd.DataFrame(np.random.randn(3, 3), columns=["A", "B", "C"]) + df.drop(["A", "X"], axis=1, errors="ignore") - Add support for separating years and quarters using dashes, for example 2014-Q1. (:issue:`9688`) @@ -382,19 +381,16 @@ New behavior .. ipython:: python - pd.set_option('display.width', 80) - pd.Index(range(4), name='foo') - pd.Index(range(30), name='foo') - pd.Index(range(104), name='foo') - pd.CategoricalIndex(['a', 'bb', 'ccc', 'dddd'], - ordered=True, name='foobar') - pd.CategoricalIndex(['a', 'bb', 'ccc', 'dddd'] * 10, - ordered=True, name='foobar') - pd.CategoricalIndex(['a', 'bb', 'ccc', 'dddd'] * 100, - ordered=True, name='foobar') - pd.date_range('20130101', periods=4, name='foo', tz='US/Eastern') - pd.date_range('20130101', periods=25, freq='D') - pd.date_range('20130101', periods=104, name='foo', tz='US/Eastern') + pd.set_option("display.width", 80) + pd.Index(range(4), name="foo") + pd.Index(range(30), name="foo") + pd.Index(range(104), name="foo") + pd.CategoricalIndex(["a", "bb", "ccc", "dddd"], ordered=True, name="foobar") + pd.CategoricalIndex(["a", "bb", "ccc", "dddd"] * 10, ordered=True, name="foobar") + pd.CategoricalIndex(["a", "bb", "ccc", "dddd"] * 100, ordered=True, name="foobar") + pd.date_range("20130101", periods=4, name="foo", tz="US/Eastern") + pd.date_range("20130101", periods=25, freq="D") + pd.date_range("20130101", periods=104, name="foo", tz="US/Eastern") .. _whatsnew_0161.performance: diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst index 2cb0cbec68eff..bb2aa166419b4 100644 --- a/doc/source/whatsnew/v0.16.2.rst +++ b/doc/source/whatsnew/v0.16.2.rst @@ -48,9 +48,10 @@ This can be rewritten as .. code-block:: python - (df.pipe(h) # noqa F821 - .pipe(g, arg1=1) # noqa F821 - .pipe(f, arg2=2, arg3=3) # noqa F821 + ( + df.pipe(h) # noqa F821 + .pipe(g, arg1=1) # noqa F821 + .pipe(f, arg2=2, arg3=3) # noqa F821 ) Now both the code and the logic flow from top to bottom. Keyword arguments are next to @@ -64,15 +65,16 @@ of ``(function, keyword)`` indicating where the DataFrame should flow. For examp import statsmodels.formula.api as sm - bb = pd.read_csv('data/baseball.csv', index_col='id') + bb = pd.read_csv("data/baseball.csv", index_col="id") # sm.ols takes (formula, data) - (bb.query('h > 0') - .assign(ln_h=lambda df: np.log(df.h)) - .pipe((sm.ols, 'data'), 'hr ~ ln_h + year + g + C(lg)') - .fit() - .summary() - ) + ( + bb.query("h > 0") + .assign(ln_h=lambda df: np.log(df.h)) + .pipe((sm.ols, "data"), "hr ~ ln_h + year + g + C(lg)") + .fit() + .summary() + ) The pipe method is inspired by unix pipes, which stream text through processes. More recently dplyr_ and magrittr_ have introduced the diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index e8f37a72f6417..9f700dacf38c7 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -80,9 +80,13 @@ The new implementation allows for having a single-timezone across all rows, with .. ipython:: python - df = pd.DataFrame({'A': pd.date_range('20130101', periods=3), - 'B': pd.date_range('20130101', periods=3, tz='US/Eastern'), - 'C': pd.date_range('20130101', periods=3, tz='CET')}) + df = pd.DataFrame( + { + "A": pd.date_range("20130101", periods=3), + "B": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "C": pd.date_range("20130101", periods=3, tz="CET"), + } + ) df df.dtypes @@ -95,8 +99,8 @@ This uses a new-dtype representation as well, that is very similar in look-and-f .. ipython:: python - df['B'].dtype - type(df['B'].dtype) + df["B"].dtype + type(df["B"].dtype) .. note:: @@ -119,8 +123,8 @@ This uses a new-dtype representation as well, that is very similar in look-and-f .. ipython:: python - pd.date_range('20130101', periods=3, tz='US/Eastern') - pd.date_range('20130101', periods=3, tz='US/Eastern').dtype + pd.date_range("20130101", periods=3, tz="US/Eastern") + pd.date_range("20130101", periods=3, tz="US/Eastern").dtype .. _whatsnew_0170.gil: @@ -138,9 +142,10 @@ as well as the ``.sum()`` operation. N = 1000000 ngroups = 10 - df = DataFrame({'key': np.random.randint(0, ngroups, size=N), - 'data': np.random.randn(N)}) - df.groupby('key')['data'].sum() + df = DataFrame( + {"key": np.random.randint(0, ngroups, size=N), "data": np.random.randn(N)} + ) + df.groupby("key")["data"].sum() Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT_), or performing multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask_ library. @@ -189,16 +194,16 @@ We are now supporting a ``Series.dt.strftime`` method for datetime-likes to gene .. ipython:: python # DatetimeIndex - s = pd.Series(pd.date_range('20130101', periods=4)) + s = pd.Series(pd.date_range("20130101", periods=4)) s - s.dt.strftime('%Y/%m/%d') + s.dt.strftime("%Y/%m/%d") .. ipython:: python # PeriodIndex - s = pd.Series(pd.period_range('20130101', periods=4)) + s = pd.Series(pd.period_range("20130101", periods=4)) s - s.dt.strftime('%Y/%m/%d') + s.dt.strftime("%Y/%m/%d") The string format is as the python standard library and details can be found `here `_ @@ -210,7 +215,7 @@ Series.dt.total_seconds .. ipython:: python # TimedeltaIndex - s = pd.Series(pd.timedelta_range('1 minutes', periods=4)) + s = pd.Series(pd.timedelta_range("1 minutes", periods=4)) s s.dt.total_seconds() @@ -225,18 +230,18 @@ A multiplied freq represents a span of corresponding length. The example below c .. ipython:: python - p = pd.Period('2015-08-01', freq='3D') + p = pd.Period("2015-08-01", freq="3D") p p + 1 p - 2 p.to_timestamp() - p.to_timestamp(how='E') + p.to_timestamp(how="E") You can use the multiplied freq in ``PeriodIndex`` and ``period_range``. .. ipython:: python - idx = pd.period_range('2015-08-01', periods=4, freq='2D') + idx = pd.period_range("2015-08-01", periods=4, freq="2D") idx idx + 1 @@ -249,14 +254,14 @@ Support for SAS XPORT files .. code-block:: python - df = pd.read_sas('sas_xport.xpt') + df = pd.read_sas("sas_xport.xpt") It is also possible to obtain an iterator and read an XPORT file incrementally. .. code-block:: python - for df in pd.read_sas('sas_xport.xpt', chunksize=10000): + for df in pd.read_sas("sas_xport.xpt", chunksize=10000): do_something(df) See the :ref:`docs ` for more details. @@ -270,7 +275,7 @@ Support for math functions in .eval() .. code-block:: python - df = pd.DataFrame({'a': np.random.randn(10)}) + df = pd.DataFrame({"a": np.random.randn(10)}) df.eval("b = sin(a)") The support math functions are ``sin``, ``cos``, ``exp``, ``log``, ``expm1``, ``log1p``, @@ -292,23 +297,26 @@ See the :ref:`documentation ` for more details. .. ipython:: python - df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], - columns=pd.MultiIndex.from_product( - [['foo', 'bar'], ['a', 'b']], names=['col1', 'col2']), - index=pd.MultiIndex.from_product([['j'], ['l', 'k']], - names=['i1', 'i2'])) + df = pd.DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], + columns=pd.MultiIndex.from_product( + [["foo", "bar"], ["a", "b"]], names=["col1", "col2"] + ), + index=pd.MultiIndex.from_product([["j"], ["l", "k"]], names=["i1", "i2"]), + ) df - df.to_excel('test.xlsx') + df.to_excel("test.xlsx") - df = pd.read_excel('test.xlsx', header=[0, 1], index_col=[0, 1]) + df = pd.read_excel("test.xlsx", header=[0, 1], index_col=[0, 1]) df .. ipython:: python :suppress: import os - os.remove('test.xlsx') + + os.remove("test.xlsx") Previously, it was necessary to specify the ``has_index_names`` argument in ``read_excel``, if the serialized data had index names. For version 0.17.0 the output format of ``to_excel`` @@ -354,14 +362,14 @@ Some East Asian countries use Unicode characters its width is corresponding to 2 .. ipython:: python - df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']}) + df = pd.DataFrame({u"国籍": ["UK", u"日本"], u"名前": ["Alice", u"しのぶ"]}) df; .. image:: ../_static/option_unicode01.png .. ipython:: python - pd.set_option('display.unicode.east_asian_width', True) + pd.set_option("display.unicode.east_asian_width", True) df; .. image:: ../_static/option_unicode02.png @@ -371,7 +379,7 @@ For further details, see :ref:`here ` .. ipython:: python :suppress: - pd.set_option('display.unicode.east_asian_width', False) + pd.set_option("display.unicode.east_asian_width", False) .. _whatsnew_0170.enhancements.other: @@ -391,9 +399,9 @@ Other enhancements .. ipython:: python - df1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']}) - df2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]}) - pd.merge(df1, df2, on='col1', how='outer', indicator=True) + df1 = pd.DataFrame({"col1": [0, 1], "col_left": ["a", "b"]}) + df2 = pd.DataFrame({"col1": [1, 2, 2], "col_right": [2, 2, 2]}) + pd.merge(df1, df2, on="col1", how="outer", indicator=True) For more, see the :ref:`updated docs ` @@ -407,7 +415,7 @@ Other enhancements .. ipython:: python - foo = pd.Series([1, 2], name='foo') + foo = pd.Series([1, 2], name="foo") bar = pd.Series([1, 2]) baz = pd.Series([4, 5]) @@ -434,46 +442,43 @@ Other enhancements .. ipython:: python ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13]) - ser.interpolate(limit=1, limit_direction='both') + ser.interpolate(limit=1, limit_direction="both") - Added a ``DataFrame.round`` method to round the values to a variable number of decimal places (:issue:`10568`). .. ipython:: python - df = pd.DataFrame(np.random.random([3, 3]), - columns=['A', 'B', 'C'], - index=['first', 'second', 'third']) + df = pd.DataFrame( + np.random.random([3, 3]), + columns=["A", "B", "C"], + index=["first", "second", "third"], + ) df df.round(2) - df.round({'A': 0, 'C': 2}) + df.round({"A": 0, "C": 2}) - ``drop_duplicates`` and ``duplicated`` now accept a ``keep`` keyword to target first, last, and all duplicates. The ``take_last`` keyword is deprecated, see :ref:`here ` (:issue:`6511`, :issue:`8505`) .. ipython:: python - s = pd.Series(['A', 'B', 'C', 'A', 'B', 'D']) + s = pd.Series(["A", "B", "C", "A", "B", "D"]) s.drop_duplicates() - s.drop_duplicates(keep='last') + s.drop_duplicates(keep="last") s.drop_duplicates(keep=False) - Reindex now has a ``tolerance`` argument that allows for finer control of :ref:`basics.limits_on_reindex_fill` (:issue:`10411`): .. ipython:: python - df = pd.DataFrame({'x': range(5), - 't': pd.date_range('2000-01-01', periods=5)}) - df.reindex([0.1, 1.9, 3.5], - method='nearest', - tolerance=0.2) + df = pd.DataFrame({"x": range(5), "t": pd.date_range("2000-01-01", periods=5)}) + df.reindex([0.1, 1.9, 3.5], method="nearest", tolerance=0.2) When used on a ``DatetimeIndex``, ``TimedeltaIndex`` or ``PeriodIndex``, ``tolerance`` will coerced into a ``Timedelta`` if possible. This allows you to specify tolerance with a string: .. ipython:: python - df = df.set_index('t') - df.reindex(pd.to_datetime(['1999-12-31']), - method='nearest', - tolerance='1 day') + df = df.set_index("t") + df.reindex(pd.to_datetime(["1999-12-31"]), method="nearest", tolerance="1 day") ``tolerance`` is also exposed by the lower level ``Index.get_indexer`` and ``Index.get_loc`` methods. @@ -627,13 +632,13 @@ Of course you can coerce this as well. .. ipython:: python - pd.to_datetime(['2009-07-31', 'asd'], errors='coerce') + pd.to_datetime(["2009-07-31", "asd"], errors="coerce") To keep the previous behavior, you can use ``errors='ignore'``: .. ipython:: python - pd.to_datetime(['2009-07-31', 'asd'], errors='ignore') + pd.to_datetime(["2009-07-31", "asd"], errors="ignore") Furthermore, ``pd.to_timedelta`` has gained a similar API, of ``errors='raise'|'ignore'|'coerce'``, and the ``coerce`` keyword has been deprecated in favor of ``errors='coerce'``. @@ -667,9 +672,9 @@ New behavior: .. ipython:: python - pd.Timestamp('2012Q2') - pd.Timestamp('2014') - pd.DatetimeIndex(['2012Q2', '2014']) + pd.Timestamp("2012Q2") + pd.Timestamp("2014") + pd.DatetimeIndex(["2012Q2", "2014"]) .. note:: @@ -678,6 +683,7 @@ New behavior: .. ipython:: python import pandas.tseries.offsets as offsets + pd.Timestamp.now() pd.Timestamp.now() + offsets.DateOffset(years=1) @@ -780,8 +786,7 @@ Previous behavior: .. ipython:: python - df_with_missing = pd.DataFrame({'col1': [0, np.nan, 2], - 'col2': [1, np.nan, np.nan]}) + df_with_missing = pd.DataFrame({"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}) df_with_missing @@ -806,18 +811,16 @@ New behavior: .. ipython:: python - df_with_missing.to_hdf('file.h5', - 'df_with_missing', - format='table', - mode='w') + df_with_missing.to_hdf("file.h5", "df_with_missing", format="table", mode="w") - pd.read_hdf('file.h5', 'df_with_missing') + pd.read_hdf("file.h5", "df_with_missing") .. ipython:: python :suppress: import os - os.remove('file.h5') + + os.remove("file.h5") See the :ref:`docs ` for more details. @@ -848,8 +851,8 @@ regular formatting as well as scientific notation, similar to how numpy's ``prec .. ipython:: python - pd.set_option('display.precision', 2) - pd.DataFrame({'x': [123.456789]}) + pd.set_option("display.precision", 2) + pd.DataFrame({"x": [123.456789]}) To preserve output behavior with prior versions the default value of ``display.precision`` has been reduced to ``6`` from ``7``. @@ -857,7 +860,7 @@ from ``7``. .. ipython:: python :suppress: - pd.set_option('display.precision', 6) + pd.set_option("display.precision", 6) .. _whatsnew_0170.api_breaking.categorical_unique: @@ -871,14 +874,11 @@ Changes to ``Categorical.unique`` .. ipython:: python - cat = pd.Categorical(['C', 'A', 'B', 'C'], - categories=['A', 'B', 'C'], - ordered=True) + cat = pd.Categorical(["C", "A", "B", "C"], categories=["A", "B", "C"], ordered=True) cat cat.unique() - cat = pd.Categorical(['C', 'A', 'B', 'C'], - categories=['A', 'B', 'C']) + cat = pd.Categorical(["C", "A", "B", "C"], categories=["A", "B", "C"]) cat cat.unique() @@ -980,9 +980,11 @@ Removal of prior version deprecations/changes .. ipython:: python np.random.seed(1234) - df = pd.DataFrame(np.random.randn(5, 2), - columns=list('AB'), - index=pd.date_range('2013-01-01', periods=5)) + df = pd.DataFrame( + np.random.randn(5, 2), + columns=list("AB"), + index=pd.date_range("2013-01-01", periods=5), + ) df Previously @@ -1005,7 +1007,7 @@ Removal of prior version deprecations/changes .. ipython:: python - df.add(df.A, axis='index') + df.add(df.A, axis="index") - Remove ``table`` keyword in ``HDFStore.put/append``, in favor of using ``format=`` (:issue:`4645`) diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst index 5d15a01aee5a0..6b0a28ec47568 100644 --- a/doc/source/whatsnew/v0.17.1.rst +++ b/doc/source/whatsnew/v0.17.1.rst @@ -52,8 +52,8 @@ Here's a quick example: .. ipython:: python np.random.seed(123) - df = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde')) - html = df.style.background_gradient(cmap='viridis', low=.5) + df = pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")) + html = df.style.background_gradient(cmap="viridis", low=0.5) We can render the HTML to get the following table. @@ -80,14 +80,14 @@ Enhancements .. ipython:: python - df = pd.DataFrame({'A': ['foo'] * 1000}) # noqa: F821 - df['B'] = df['A'].astype('category') + df = pd.DataFrame({"A": ["foo"] * 1000}) # noqa: F821 + df["B"] = df["A"].astype("category") # shows the '+' as we have object dtypes df.info() # we have an accurate memory assessment (but can be expensive to compute this) - df.info(memory_usage='deep') + df.info(memory_usage="deep") - ``Index`` now has a ``fillna`` method (:issue:`10089`) @@ -99,11 +99,11 @@ Enhancements .. ipython:: python - s = pd.Series(list('aabb')).astype('category') + s = pd.Series(list("aabb")).astype("category") s s.str.contains("a") - date = pd.Series(pd.date_range('1/1/2015', periods=5)).astype('category') + date = pd.Series(pd.date_range("1/1/2015", periods=5)).astype("category") date date.dt.day diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst index 13ed6bc38163b..3db00f686d62c 100644 --- a/doc/source/whatsnew/v0.18.1.rst +++ b/doc/source/whatsnew/v0.18.1.rst @@ -42,6 +42,7 @@ see :ref:`Custom Business Hour ` (:issue:`11514`) from pandas.tseries.offsets import CustomBusinessHour from pandas.tseries.holiday import USFederalHolidayCalendar + bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar()) Friday before MLK Day @@ -49,6 +50,7 @@ Friday before MLK Day .. ipython:: python import datetime + dt = datetime.datetime(2014, 1, 17, 15) dt + bhour_us @@ -72,41 +74,42 @@ Previously you would have to do this to get a rolling window mean per-group: .. ipython:: python - df = pd.DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8, - 'B': np.arange(40)}) + df = pd.DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) df .. ipython:: python - df.groupby('A').apply(lambda x: x.rolling(4).B.mean()) + df.groupby("A").apply(lambda x: x.rolling(4).B.mean()) Now you can do: .. ipython:: python - df.groupby('A').rolling(4).B.mean() + df.groupby("A").rolling(4).B.mean() For ``.resample(..)`` type of operations, previously you would have to: .. ipython:: python - df = pd.DataFrame({'date': pd.date_range(start='2016-01-01', - periods=4, - freq='W'), - 'group': [1, 1, 2, 2], - 'val': [5, 6, 7, 8]}).set_index('date') + df = pd.DataFrame( + { + "date": pd.date_range(start="2016-01-01", periods=4, freq="W"), + "group": [1, 1, 2, 2], + "val": [5, 6, 7, 8], + } + ).set_index("date") df .. ipython:: python - df.groupby('group').apply(lambda x: x.resample('1D').ffill()) + df.groupby("group").apply(lambda x: x.resample("1D").ffill()) Now you can do: .. ipython:: python - df.groupby('group').resample('1D').ffill() + df.groupby("group").resample("1D").ffill() .. _whatsnew_0181.enhancements.method_chain: @@ -129,9 +132,7 @@ arguments. .. ipython:: python - df = pd.DataFrame({'A': [1, 2, 3], - 'B': [4, 5, 6], - 'C': [7, 8, 9]}) + df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) df.where(lambda x: x > 4, lambda x: x + 10) Methods ``.loc[]``, ``.iloc[]``, ``.ix[]`` @@ -146,7 +147,7 @@ can return a valid boolean indexer or anything which is valid for these indexer' df.loc[lambda x: x.A >= 2, lambda x: x.sum() > 10] # callable returns list of labels - df.loc[lambda x: [1, 2], lambda x: ['A', 'B']] + df.loc[lambda x: [1, 2], lambda x: ["A", "B"]] Indexing with``[]`` """"""""""""""""""" @@ -157,17 +158,15 @@ class and index type. .. ipython:: python - df[lambda x: 'A'] + df[lambda x: "A"] Using these methods / indexers, you can chain data selection operations without using temporary variable. .. ipython:: python - bb = pd.read_csv('data/baseball.csv', index_col='id') - (bb.groupby(['year', 'team']) - .sum() - .loc[lambda df: df.r > 100]) + bb = pd.read_csv("data/baseball.csv", index_col="id") + (bb.groupby(["year", "team"]).sum().loc[lambda df: df.r > 100]) .. _whatsnew_0181.partial_string_indexing: @@ -180,13 +179,13 @@ Partial string indexing now matches on ``DateTimeIndex`` when part of a ``MultiI dft2 = pd.DataFrame( np.random.randn(20, 1), - columns=['A'], - index=pd.MultiIndex.from_product([pd.date_range('20130101', - periods=10, - freq='12H'), - ['a', 'b']])) + columns=["A"], + index=pd.MultiIndex.from_product( + [pd.date_range("20130101", periods=10, freq="12H"), ["a", "b"]] + ), + ) dft2 - dft2.loc['2013-01-05'] + dft2.loc["2013-01-05"] On other levels @@ -195,7 +194,7 @@ On other levels idx = pd.IndexSlice dft2 = dft2.swaplevel(0, 1).sort_index() dft2 - dft2.loc[idx[:, '2013-01-05'], :] + dft2.loc[idx[:, "2013-01-05"], :] .. _whatsnew_0181.enhancements.assembling: @@ -206,10 +205,9 @@ Assembling datetimes .. ipython:: python - df = pd.DataFrame({'year': [2015, 2016], - 'month': [2, 3], - 'day': [4, 5], - 'hour': [2, 3]}) + df = pd.DataFrame( + {"year": [2015, 2016], "month": [2, 3], "day": [4, 5], "hour": [2, 3]} + ) df Assembling using the passed frame. @@ -222,7 +220,7 @@ You can pass only the columns that you need to assemble. .. ipython:: python - pd.to_datetime(df[['year', 'month', 'day']]) + pd.to_datetime(df[["year", "month", "day"]]) .. _whatsnew_0181.other: @@ -243,7 +241,7 @@ Other enhancements .. ipython:: python - idx = pd.Index([1., 2., 3., 4.], dtype='float') + idx = pd.Index([1.0, 2.0, 3.0, 4.0], dtype="float") # default, allow_fill=True, fill_value=None idx.take([2, -1]) @@ -253,8 +251,8 @@ Other enhancements .. ipython:: python - idx = pd.Index(['a|b', 'a|c', 'b|c']) - idx.str.get_dummies('|') + idx = pd.Index(["a|b", "a|c", "b|c"]) + idx.str.get_dummies("|") - ``pd.crosstab()`` has gained a ``normalize`` argument for normalizing frequency tables (:issue:`12569`). Examples in the updated docs :ref:`here `. @@ -313,8 +311,7 @@ The index in ``.groupby(..).nth()`` output is now more consistent when the ``as_ .. ipython:: python - df = pd.DataFrame({'A': ['a', 'b', 'a'], - 'B': [1, 2, 3]}) + df = pd.DataFrame({"A": ["a", "b", "a"], "B": [1, 2, 3]}) df Previous behavior: @@ -337,16 +334,16 @@ New behavior: .. ipython:: python - df.groupby('A', as_index=True)['B'].nth(0) - df.groupby('A', as_index=False)['B'].nth(0) + df.groupby("A", as_index=True)["B"].nth(0) + df.groupby("A", as_index=False)["B"].nth(0) Furthermore, previously, a ``.groupby`` would always sort, regardless if ``sort=False`` was passed with ``.nth()``. .. ipython:: python np.random.seed(1234) - df = pd.DataFrame(np.random.randn(100, 2), columns=['a', 'b']) - df['c'] = np.random.randint(0, 4, 100) + df = pd.DataFrame(np.random.randn(100, 2), columns=["a", "b"]) + df["c"] = np.random.randint(0, 4, 100) Previous behavior: @@ -374,8 +371,8 @@ New behavior: .. ipython:: python - df.groupby('c', sort=True).nth(1) - df.groupby('c', sort=False).nth(1) + df.groupby("c", sort=True).nth(1) + df.groupby("c", sort=False).nth(1) .. _whatsnew_0181.numpy_compatibility: @@ -421,8 +418,9 @@ Using ``apply`` on resampling groupby operations (using a ``pd.TimeGrouper``) no .. ipython:: python - df = pd.DataFrame({'date': pd.to_datetime(['10/10/2000', '11/10/2000']), - 'value': [10, 13]}) + df = pd.DataFrame( + {"date": pd.to_datetime(["10/10/2000", "11/10/2000"]), "value": [10, 13]} + ) df Previous behavior: diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst index 6e8c4273a0550..4acf9d7181781 100644 --- a/doc/source/whatsnew/v0.19.0.rst +++ b/doc/source/whatsnew/v0.19.0.rst @@ -49,10 +49,8 @@ except that we match on nearest key rather than equal keys. .. ipython:: python - left = pd.DataFrame({'a': [1, 5, 10], - 'left_val': ['a', 'b', 'c']}) - right = pd.DataFrame({'a': [1, 2, 3, 6, 7], - 'right_val': [1, 2, 3, 6, 7]}) + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) left right @@ -62,13 +60,13 @@ recent value otherwise. .. ipython:: python - pd.merge_asof(left, right, on='a') + pd.merge_asof(left, right, on="a") We can also match rows ONLY with prior data, and not an exact match. .. ipython:: python - pd.merge_asof(left, right, on='a', allow_exact_matches=False) + pd.merge_asof(left, right, on="a", allow_exact_matches=False) In a typical time-series example, we have ``trades`` and ``quotes`` and we want to ``asof-join`` them. @@ -76,36 +74,44 @@ This also illustrates using the ``by`` parameter to group data before merging. .. ipython:: python - trades = pd.DataFrame({ - 'time': pd.to_datetime(['20160525 13:30:00.023', - '20160525 13:30:00.038', - '20160525 13:30:00.048', - '20160525 13:30:00.048', - '20160525 13:30:00.048']), - 'ticker': ['MSFT', 'MSFT', - 'GOOG', 'GOOG', 'AAPL'], - 'price': [51.95, 51.95, - 720.77, 720.92, 98.00], - 'quantity': [75, 155, - 100, 100, 100]}, - columns=['time', 'ticker', 'price', 'quantity']) - - quotes = pd.DataFrame({ - 'time': pd.to_datetime(['20160525 13:30:00.023', - '20160525 13:30:00.023', - '20160525 13:30:00.030', - '20160525 13:30:00.041', - '20160525 13:30:00.048', - '20160525 13:30:00.049', - '20160525 13:30:00.072', - '20160525 13:30:00.075']), - 'ticker': ['GOOG', 'MSFT', 'MSFT', 'MSFT', - 'GOOG', 'AAPL', 'GOOG', 'MSFT'], - 'bid': [720.50, 51.95, 51.97, 51.99, - 720.50, 97.99, 720.50, 52.01], - 'ask': [720.93, 51.96, 51.98, 52.00, - 720.93, 98.01, 720.88, 52.03]}, - columns=['time', 'ticker', 'bid', 'ask']) + trades = pd.DataFrame( + { + "time": pd.to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.038", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + ] + ), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "price", "quantity"], + ) + + quotes = pd.DataFrame( + { + "time": pd.to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.048", + "20160525 13:30:00.049", + "20160525 13:30:00.072", + "20160525 13:30:00.075", + ] + ), + "ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL", "GOOG", "MSFT"], + "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03], + }, + columns=["time", "ticker", "bid", "ask"], + ) .. ipython:: python @@ -118,9 +124,7 @@ that forward filling happens automatically taking the most recent non-NaN value. .. ipython:: python - pd.merge_asof(trades, quotes, - on='time', - by='ticker') + pd.merge_asof(trades, quotes, on="time", by="ticker") This returns a merged DataFrame with the entries in the same order as the original left passed DataFrame (``trades`` in this case), with the fields of the ``quotes`` merged. @@ -135,9 +139,10 @@ See the full documentation :ref:`here `. .. ipython:: python - dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, - index=pd.date_range('20130101 09:00:00', - periods=5, freq='s')) + dft = pd.DataFrame( + {"B": [0, 1, 2, np.nan, 4]}, + index=pd.date_range("20130101 09:00:00", periods=5, freq="s"), + ) dft This is a regular frequency index. Using an integer window parameter works to roll along the window frequency. @@ -151,20 +156,26 @@ Specifying an offset allows a more intuitive specification of the rolling freque .. ipython:: python - dft.rolling('2s').sum() + dft.rolling("2s").sum() Using a non-regular, but still monotonic index, rolling with an integer window does not impart any special calculation. .. ipython:: python - dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, - index=pd.Index([pd.Timestamp('20130101 09:00:00'), - pd.Timestamp('20130101 09:00:02'), - pd.Timestamp('20130101 09:00:03'), - pd.Timestamp('20130101 09:00:05'), - pd.Timestamp('20130101 09:00:06')], - name='foo')) + dft = pd.DataFrame( + {"B": [0, 1, 2, np.nan, 4]}, + index=pd.Index( + [ + pd.Timestamp("20130101 09:00:00"), + pd.Timestamp("20130101 09:00:02"), + pd.Timestamp("20130101 09:00:03"), + pd.Timestamp("20130101 09:00:05"), + pd.Timestamp("20130101 09:00:06"), + ], + name="foo", + ), + ) dft dft.rolling(2).sum() @@ -173,7 +184,7 @@ Using the time-specification generates variable windows for this sparse data. .. ipython:: python - dft.rolling('2s').sum() + dft.rolling("2s").sum() Furthermore, we now allow an optional ``on`` parameter to specify a column (rather than the default of the index) in a DataFrame. @@ -182,7 +193,7 @@ default of the index) in a DataFrame. dft = dft.reset_index() dft - dft.rolling('2s', on='foo').sum() + dft.rolling("2s", on="foo").sum() .. _whatsnew_0190.enhancements.read_csv_dupe_col_names_support: @@ -199,8 +210,8 @@ they are in the file or passed in as the ``names`` parameter (:issue:`7160`, :is .. ipython:: python - data = '0,1,2\n3,4,5' - names = ['a', 'b', 'a'] + data = "0,1,2\n3,4,5" + names = ["a", "b", "a"] **Previous behavior**: @@ -235,17 +246,22 @@ converting to ``Categorical`` after parsing. See the io :ref:`docs here ` (:issue:`10008`, :issue:`13156`) @@ -415,7 +431,7 @@ The ``pd.get_dummies`` function now returns dummy-encoded columns as small integ .. ipython:: python - pd.get_dummies(['a', 'b', 'a', 'c']).dtypes + pd.get_dummies(["a", "b", "a", "c"]).dtypes .. _whatsnew_0190.enhancements.to_numeric_downcast: @@ -427,9 +443,9 @@ Downcast values to smallest possible dtype in ``to_numeric`` .. ipython:: python - s = ['1', 2, 3] - pd.to_numeric(s, downcast='unsigned') - pd.to_numeric(s, downcast='integer') + s = ["1", 2, 3] + pd.to_numeric(s, downcast="unsigned") + pd.to_numeric(s, downcast="integer") .. _whatsnew_0190.dev_api: @@ -447,7 +463,8 @@ The following are now part of this API: import pprint from pandas.api import types - funcs = [f for f in dir(types) if not f.startswith('_')] + + funcs = [f for f in dir(types) if not f.startswith("_")] pprint.pprint(funcs) .. note:: @@ -472,16 +489,16 @@ Other enhancements .. ipython:: python - df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5), - 'a': np.arange(5)}, - index=pd.MultiIndex.from_arrays([[1, 2, 3, 4, 5], - pd.date_range('2015-01-01', - freq='W', - periods=5) - ], names=['v', 'd'])) + df = pd.DataFrame( + {"date": pd.date_range("2015-01-01", freq="W", periods=5), "a": np.arange(5)}, + index=pd.MultiIndex.from_arrays( + [[1, 2, 3, 4, 5], pd.date_range("2015-01-01", freq="W", periods=5)], + names=["v", "d"], + ), + ) df - df.resample('M', on='date').sum() - df.resample('M', level='d').sum() + df.resample("M", on="date").sum() + df.resample("M", level="d").sum() - The ``.get_credentials()`` method of ``GbqConnector`` can now first try to fetch `the application default credentials `__. See the docs for more details (:issue:`13577`). - The ``.tz_localize()`` method of ``DatetimeIndex`` and ``Timestamp`` has gained the ``errors`` keyword, so you can potentially coerce nonexistent timestamps to ``NaT``. The default behavior remains to raising a ``NonExistentTimeError`` (:issue:`13057`) @@ -507,10 +524,9 @@ Other enhancements .. ipython:: python - df = pd.DataFrame({'A': [2, 7], 'B': [3, 5], 'C': [4, 8]}, - index=['row1', 'row2']) + df = pd.DataFrame({"A": [2, 7], "B": [3, 5], "C": [4, 8]}, index=["row1", "row2"]) df - df.sort_values(by='row2', axis=1) + df.sort_values(by="row2", axis=1) - Added documentation to :ref:`I/O` regarding the perils of reading in columns with mixed dtypes and how to handle it (:issue:`13746`) - :meth:`~DataFrame.to_html` now has a ``border`` argument to control the value in the opening ```` tag. The default is the value of the ``html.border`` option, which defaults to 1. This also affects the notebook HTML repr, but since Jupyter's CSS includes a border-width attribute, the visual effect is the same. (:issue:`11563`). @@ -583,12 +599,12 @@ Arithmetic operators align both ``index`` (no changes). .. ipython:: python - s1 = pd.Series([1, 2, 3], index=list('ABC')) - s2 = pd.Series([2, 2, 2], index=list('ABD')) + s1 = pd.Series([1, 2, 3], index=list("ABC")) + s2 = pd.Series([2, 2, 2], index=list("ABD")) s1 + s2 - df1 = pd.DataFrame([1, 2, 3], index=list('ABC')) - df2 = pd.DataFrame([2, 2, 2], index=list('ABD')) + df1 = pd.DataFrame([1, 2, 3], index=list("ABC")) + df2 = pd.DataFrame([2, 2, 2], index=list("ABD")) df1 + df2 Comparison operators @@ -661,8 +677,8 @@ Logical operators align both ``.index`` of left and right hand side. .. ipython:: python - s1 = pd.Series([True, False, True], index=list('ABC')) - s2 = pd.Series([True, True, True], index=list('ABD')) + s1 = pd.Series([True, False, True], index=list("ABC")) + s2 = pd.Series([True, True, True], index=list("ABD")) s1 & s2 .. note:: @@ -679,8 +695,8 @@ Logical operators align both ``.index`` of left and right hand side. .. ipython:: python - df1 = pd.DataFrame([True, False, True], index=list('ABC')) - df2 = pd.DataFrame([True, True, True], index=list('ABD')) + df1 = pd.DataFrame([True, False, True], index=list("ABC")) + df2 = pd.DataFrame([True, True, True], index=list("ABD")) df1 & df2 Flexible comparison methods @@ -691,8 +707,8 @@ which has the different ``index``. .. ipython:: python - s1 = pd.Series([1, 2, 3], index=['a', 'b', 'c']) - s2 = pd.Series([2, 2, 2], index=['b', 'c', 'd']) + s1 = pd.Series([1, 2, 3], index=["a", "b", "c"]) + s2 = pd.Series([2, 2, 2], index=["b", "c", "d"]) s1.eq(s2) s1.ge(s2) @@ -749,7 +765,7 @@ This will now convert integers/floats with the default unit of ``ns``. .. ipython:: python - pd.to_datetime([1, 'foo'], errors='coerce') + pd.to_datetime([1, "foo"], errors="coerce") Bug fixes related to ``.to_datetime()``: @@ -768,9 +784,9 @@ Merging will now preserve the dtype of the join keys (:issue:`8596`) .. ipython:: python - df1 = pd.DataFrame({'key': [1], 'v1': [10]}) + df1 = pd.DataFrame({"key": [1], "v1": [10]}) df1 - df2 = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]}) + df2 = pd.DataFrame({"key": [1, 2], "v1": [20, 30]}) df2 **Previous behavior**: @@ -796,16 +812,16 @@ We are able to preserve the join keys .. ipython:: python - pd.merge(df1, df2, how='outer') - pd.merge(df1, df2, how='outer').dtypes + pd.merge(df1, df2, how="outer") + pd.merge(df1, df2, how="outer").dtypes Of course if you have missing values that are introduced, then the resulting dtype will be upcast, which is unchanged from previous. .. ipython:: python - pd.merge(df1, df2, how='outer', on='key') - pd.merge(df1, df2, how='outer', on='key').dtypes + pd.merge(df1, df2, how="outer", on="key") + pd.merge(df1, df2, how="outer", on="key").dtypes .. _whatsnew_0190.api.describe: @@ -889,7 +905,7 @@ As a consequence of this change, ``PeriodIndex`` no longer has an integer dtype: .. ipython:: python - pi = pd.PeriodIndex(['2016-08-01'], freq='D') + pi = pd.PeriodIndex(["2016-08-01"], freq="D") pi pd.api.types.is_integer_dtype(pi) pd.api.types.is_period_dtype(pi) @@ -916,7 +932,7 @@ These result in ``pd.NaT`` without providing ``freq`` option. .. ipython:: python - pd.Period('NaT') + pd.Period("NaT") pd.Period(None) @@ -955,7 +971,7 @@ of integers (:issue:`13988`). .. ipython:: python - pi = pd.PeriodIndex(['2011-01', '2011-02'], freq='M') + pi = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") pi.values @@ -985,7 +1001,7 @@ Previous behavior: .. ipython:: python - pd.Index(['a', 'b']) + pd.Index(['a', 'c']) + pd.Index(["a", "b"]) + pd.Index(["a", "c"]) Note that numeric Index objects already performed element-wise operations. For example, the behavior of adding two integer Indexes is unchanged. @@ -1011,8 +1027,10 @@ DatetimeIndex objects resulting in a TimedeltaIndex: .. ipython:: python - (pd.DatetimeIndex(['2016-01-01', '2016-01-02']) - - pd.DatetimeIndex(['2016-01-02', '2016-01-03'])) + ( + pd.DatetimeIndex(["2016-01-01", "2016-01-02"]) + - pd.DatetimeIndex(["2016-01-02", "2016-01-03"]) + ) .. _whatsnew_0190.api.difference: @@ -1073,8 +1091,7 @@ Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex` .. ipython:: python pd.Index([1, 2, 3]).unique() - pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], - tz='Asia/Tokyo').unique() + pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="Asia/Tokyo").unique() .. _whatsnew_0190.api.multiindex: @@ -1086,8 +1103,8 @@ in ``MultiIndex`` levels (:issue:`13743`, :issue:`13854`). .. ipython:: python - cat = pd.Categorical(['a', 'b'], categories=list("bac")) - lvl1 = ['foo', 'bar'] + cat = pd.Categorical(["a", "b"], categories=list("bac")) + lvl1 = ["foo", "bar"] midx = pd.MultiIndex.from_arrays([cat, lvl1]) midx @@ -1113,9 +1130,9 @@ As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes .. ipython:: python - df = pd.DataFrame({'A': [0, 1], 'B': [10, 11], 'C': cat}) - df_grouped = df.groupby(by=['A', 'C']).first() - df_set_idx = df.set_index(['A', 'C']) + df = pd.DataFrame({"A": [0, 1], "B": [10, 11], "C": cat}) + df_grouped = df.groupby(by=["A", "C"]).first() + df_set_idx = df.set_index(["A", "C"]) **Previous behavior**: @@ -1163,7 +1180,7 @@ the result of calling :func:`read_csv` without the ``chunksize=`` argument .. ipython:: python - data = 'A,B\n0,1\n2,3\n4,5\n6,7' + data = "A,B\n0,1\n2,3\n4,5\n6,7" **Previous behavior**: @@ -1248,7 +1265,7 @@ Operators now preserve dtypes .. code-block:: python - s = pd.SparseSeries([1., 0., 2., 0.], fill_value=0) + s = pd.SparseSeries([1.0, 0.0, 2.0, 0.0], fill_value=0) s s.astype(np.int64) diff --git a/doc/source/whatsnew/v0.19.1.rst b/doc/source/whatsnew/v0.19.1.rst index f8b60f457b33f..6ff3fb6900a99 100644 --- a/doc/source/whatsnew/v0.19.1.rst +++ b/doc/source/whatsnew/v0.19.1.rst @@ -8,7 +8,7 @@ Version 0.19.1 (November 3, 2016) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 This is a minor bug-fix release from 0.19.0 and includes some small regression fixes, diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst index 924c95f21ceff..bba89d78be869 100644 --- a/doc/source/whatsnew/v0.19.2.rst +++ b/doc/source/whatsnew/v0.19.2.rst @@ -8,7 +8,7 @@ Version 0.19.2 (December 24, 2016) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 This is a minor bug-fix release in the 0.19.x series and includes some small regression fixes, diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst index 7f84c6b3f17bd..430a39d2d2e97 100644 --- a/doc/source/whatsnew/v0.20.2.rst +++ b/doc/source/whatsnew/v0.20.2.rst @@ -8,7 +8,7 @@ Version 0.20.2 (June 4, 2017) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes, diff --git a/doc/source/whatsnew/v0.20.3.rst b/doc/source/whatsnew/v0.20.3.rst index 888d0048ca9f3..ff28f6830783e 100644 --- a/doc/source/whatsnew/v0.20.3.rst +++ b/doc/source/whatsnew/v0.20.3.rst @@ -8,7 +8,7 @@ Version 0.20.3 (July 7, 2017) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes diff --git a/doc/source/whatsnew/v0.21.1.rst b/doc/source/whatsnew/v0.21.1.rst index f930dfac869cd..fc4391bdba975 100644 --- a/doc/source/whatsnew/v0.21.1.rst +++ b/doc/source/whatsnew/v0.21.1.rst @@ -8,7 +8,7 @@ Version 0.21.1 (December 12, 2017) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst index 66d3ab3305565..1ee6db47a4d8e 100644 --- a/doc/source/whatsnew/v0.22.0.rst +++ b/doc/source/whatsnew/v0.22.0.rst @@ -8,7 +8,7 @@ Version 0.22.0 (December 29, 2017) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 This is a major release from 0.21.1 and includes a single, API-breaking change. @@ -119,7 +119,7 @@ instead of ``NaN``. .. ipython:: python - grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + grouper = pd.Categorical(["a", "a"], categories=["a", "b"]) pd.Series([1, 2]).groupby(grouper).sum() To restore the 0.21 behavior of returning ``NaN`` for unobserved groups, @@ -159,15 +159,14 @@ sum and ``1`` for product. .. ipython:: python - s = pd.Series([1, 1, np.nan, np.nan], - index=pd.date_range('2017', periods=4)) - s.resample('2d').sum() + s = pd.Series([1, 1, np.nan, np.nan], index=pd.date_range("2017", periods=4)) + s.resample("2d").sum() To restore the 0.21 behavior of returning ``NaN``, use ``min_count>=1``. .. ipython:: python - s.resample('2d').sum(min_count=1) + s.resample("2d").sum(min_count=1) In particular, upsampling and taking the sum or product is affected, as upsampling introduces missing values even if the original series was @@ -190,7 +189,7 @@ entirely valid. .. ipython:: python - idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + idx = pd.DatetimeIndex(["2017-01-01", "2017-01-02"]) pd.Series([1, 2], index=idx).resample("12H").sum() Once again, the ``min_count`` keyword is available to restore the 0.21 behavior. diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst index 7ccb141260f18..7447a10fa1d6b 100644 --- a/doc/source/whatsnew/v0.5.0.rst +++ b/doc/source/whatsnew/v0.5.0.rst @@ -9,7 +9,7 @@ Version 0.5.0 (October 24, 2011) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 New features diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst index 1cb9dcbe159aa..8ff688eaa91e7 100644 --- a/doc/source/whatsnew/v0.6.0.rst +++ b/doc/source/whatsnew/v0.6.0.rst @@ -8,7 +8,7 @@ Version 0.6.0 (November 25, 2011) .. ipython:: python :suppress: - from pandas import * # noqa F401, F403 + from pandas import * # noqa F401, F403 New features diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst index 5ed48c0d8d6d9..4ca31baf560bb 100644 --- a/doc/source/whatsnew/v0.7.3.rst +++ b/doc/source/whatsnew/v0.7.3.rst @@ -23,7 +23,8 @@ New features .. code-block:: python from pandas.tools.plotting import scatter_matrix - scatter_matrix(df, alpha=0.2) # noqa F821 + + scatter_matrix(df, alpha=0.2) # noqa F821 - Add ``stacked`` argument to Series and DataFrame's ``plot`` method for @@ -31,12 +32,12 @@ New features .. code-block:: python - df.plot(kind='bar', stacked=True) # noqa F821 + df.plot(kind="bar", stacked=True) # noqa F821 .. code-block:: python - df.plot(kind='barh', stacked=True) # noqa F821 + df.plot(kind="barh", stacked=True) # noqa F821 - Add log x and y :ref:`scaling options ` to @@ -52,9 +53,9 @@ Reverted some changes to how NA values (represented typically as ``NaN`` or .. ipython:: python - series = pd.Series(['Steve', np.nan, 'Joe']) - series == 'Steve' - series != 'Steve' + series = pd.Series(["Steve", np.nan, "Joe"]) + series == "Steve" + series != "Steve" In comparisons, NA / NaN will always come through as ``False`` except with ``!=`` which is ``True``. *Be very careful* with boolean arithmetic, especially @@ -63,7 +64,7 @@ filter into boolean array operations if you are worried about this: .. ipython:: python - mask = series == 'Steve' + mask = series == "Steve" series[mask & series.notnull()] While propagating NA in comparisons may seem like the right behavior to some @@ -82,15 +83,18 @@ Series, to be more consistent with the ``groupby`` behavior with DataFrame: .. ipython:: python :okwarning: - df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), 'D': np.random.randn(8)}) + df = pd.DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.randn(8), + "D": np.random.randn(8), + } + ) df - grouped = df.groupby('A')['C'] + grouped = df.groupby("A")["C"] grouped.describe() - grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values + grouped.apply(lambda x: x.sort_values()[-2:]) # top 2 values .. _whatsnew_0.7.3.contributors: diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst index 9bba68d8c331d..8a84630a28b34 100644 --- a/doc/source/whatsnew/v0.8.0.rst +++ b/doc/source/whatsnew/v0.8.0.rst @@ -159,7 +159,8 @@ New plotting methods .. code-block:: python import pandas as pd - fx = pd.read_pickle('data/fx_prices') + + fx = pd.read_pickle("data/fx_prices") import matplotlib.pyplot as plt ``Series.plot`` now supports a ``secondary_y`` option: @@ -168,20 +169,19 @@ New plotting methods plt.figure() - fx['FR'].plot(style='g') + fx["FR"].plot(style="g") - fx['IT'].plot(style='k--', secondary_y=True) + fx["IT"].plot(style="k--", secondary_y=True) Vytautas Jancauskas, the 2012 GSOC participant, has added many new plot types. For example, ``'kde'`` is a new option: .. ipython:: python - s = pd.Series(np.concatenate((np.random.randn(1000), - np.random.randn(1000) * 0.5 + 3))) + s = pd.Series(np.concatenate((np.random.randn(1000), np.random.randn(1000) * 0.5 + 3))) plt.figure() s.hist(density=True, alpha=0.2) - s.plot(kind='kde') + s.plot(kind="kde") See :ref:`the plotting page ` for much more. @@ -205,7 +205,8 @@ with code using scalar values because you are handing control over to NumPy: .. ipython:: python import datetime - rng = pd.date_range('1/1/2000', periods=10) + + rng = pd.date_range("1/1/2000", periods=10) rng[5] isinstance(rng[5], datetime.datetime) rng_asarray = np.asarray(rng) @@ -251,7 +252,7 @@ type. See `matplotlib documentation .. ipython:: python - rng = pd.date_range('1/1/2000', periods=10) + rng = pd.date_range("1/1/2000", periods=10) rng np.asarray(rng) converted = np.asarray(rng, dtype=object) diff --git a/doc/source/whatsnew/v0.9.0.rst b/doc/source/whatsnew/v0.9.0.rst index 5172b1989765d..44ded51e31fda 100644 --- a/doc/source/whatsnew/v0.9.0.rst +++ b/doc/source/whatsnew/v0.9.0.rst @@ -41,9 +41,11 @@ API changes import io - data = ('0,0,1\n' - '1,1,0\n' - '0,1,0') + data = """ + 0,0,1 + 1,1,0 + 0,1,0 + """ df = pd.read_csv(io.StringIO(data), header=None) df @@ -59,7 +61,7 @@ API changes s1 = pd.Series([1, 2, 3]) s1 - s2 = pd.Series(s1, index=['foo', 'bar', 'baz']) + s2 = pd.Series(s1, index=["foo", "bar", "baz"]) s2 - Deprecated ``day_of_year`` API removed from PeriodIndex, use ``dayofyear``