Skip to content

Commit 74fc9ce

Browse files
authored
DOC: Fix code block line length (#36773)
1 parent 505f937 commit 74fc9ce

22 files changed

+320
-75
lines changed

doc/source/getting_started/intro_tutorials/06_calculate_statistics.rst

+4-1
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,10 @@ aggregating statistics for given columns can be defined using the
123123
.. ipython:: python
124124
125125
titanic.agg(
126-
{"Age": ["min", "max", "median", "skew"], "Fare": ["min", "max", "median", "mean"]}
126+
{
127+
"Age": ["min", "max", "median", "skew"],
128+
"Fare": ["min", "max", "median", "mean"],
129+
}
127130
)
128131
129132
.. raw:: html

doc/source/user_guide/advanced.rst

+8-3
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,8 @@ whereas a tuple of lists refer to several values within a level:
304304
.. ipython:: python
305305
306306
s = pd.Series(
307-
[1, 2, 3, 4, 5, 6], index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]])
307+
[1, 2, 3, 4, 5, 6],
308+
index=pd.MultiIndex.from_product([["A", "B"], ["c", "d", "e"]]),
308309
)
309310
s.loc[[("A", "c"), ("B", "d")]] # list of tuples
310311
s.loc[(["A", "B"], ["c", "d"])] # tuple of lists
@@ -819,7 +820,9 @@ values **not** in the categories, similarly to how you can reindex **any** panda
819820

820821
.. ipython:: python
821822
822-
df3 = pd.DataFrame({"A": np.arange(3), "B": pd.Series(list("abc")).astype("category")})
823+
df3 = pd.DataFrame(
824+
{"A": np.arange(3), "B": pd.Series(list("abc")).astype("category")}
825+
)
823826
df3 = df3.set_index("B")
824827
df3
825828
@@ -934,7 +937,9 @@ example, be millisecond offsets.
934937
np.random.randn(5, 2), index=np.arange(5) * 250.0, columns=list("AB")
935938
),
936939
pd.DataFrame(
937-
np.random.randn(6, 2), index=np.arange(4, 10) * 250.1, columns=list("AB")
940+
np.random.randn(6, 2),
941+
index=np.arange(4, 10) * 250.1,
942+
columns=list("AB"),
938943
),
939944
]
940945
)

doc/source/user_guide/basics.rst

+28-8
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,10 @@ which we illustrate:
464464
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
465465
)
466466
df2 = pd.DataFrame(
467-
{"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0], "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0]}
467+
{
468+
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
469+
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
470+
}
468471
)
469472
df1
470473
df2
@@ -712,7 +715,10 @@ Similarly, you can get the most frequently occurring value(s), i.e. the mode, of
712715
s5 = pd.Series([1, 1, 3, 3, 3, 5, 5, 7, 7, 7])
713716
s5.mode()
714717
df5 = pd.DataFrame(
715-
{"A": np.random.randint(0, 7, size=50), "B": np.random.randint(-10, 15, size=50)}
718+
{
719+
"A": np.random.randint(0, 7, size=50),
720+
"B": np.random.randint(-10, 15, size=50),
721+
}
716722
)
717723
df5.mode()
718724
@@ -1192,7 +1198,9 @@ to :ref:`merging/joining functionality <merging>`:
11921198

11931199
.. ipython:: python
11941200
1195-
s = pd.Series(["six", "seven", "six", "seven", "six"], index=["a", "b", "c", "d", "e"])
1201+
s = pd.Series(
1202+
["six", "seven", "six", "seven", "six"], index=["a", "b", "c", "d", "e"]
1203+
)
11961204
t = pd.Series({"six": 6.0, "seven": 7.0})
11971205
s
11981206
s.map(t)
@@ -1494,7 +1502,9 @@ labels).
14941502
14951503
df = pd.DataFrame(
14961504
{"x": [1, 2, 3, 4, 5, 6], "y": [10, 20, 30, 40, 50, 60]},
1497-
index=pd.MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["let", "num"]),
1505+
index=pd.MultiIndex.from_product(
1506+
[["a", "b", "c"], [1, 2]], names=["let", "num"]
1507+
),
14981508
)
14991509
df
15001510
df.rename_axis(index={"let": "abc"})
@@ -1803,7 +1813,9 @@ used to sort a pandas object by its index levels.
18031813
}
18041814
)
18051815
1806-
unsorted_df = df.reindex(index=["a", "d", "c", "b"], columns=["three", "two", "one"])
1816+
unsorted_df = df.reindex(
1817+
index=["a", "d", "c", "b"], columns=["three", "two", "one"]
1818+
)
18071819
unsorted_df
18081820
18091821
# DataFrame
@@ -1849,7 +1861,9 @@ to use to determine the sorted order.
18491861

18501862
.. ipython:: python
18511863
1852-
df1 = pd.DataFrame({"one": [2, 1, 1, 1], "two": [1, 3, 2, 4], "three": [5, 4, 3, 2]})
1864+
df1 = pd.DataFrame(
1865+
{"one": [2, 1, 1, 1], "two": [1, 3, 2, 4], "three": [5, 4, 3, 2]}
1866+
)
18531867
df1.sort_values(by="two")
18541868
18551869
The ``by`` parameter can take a list of column names, e.g.:
@@ -1994,7 +2008,9 @@ all levels to ``by``.
19942008

19952009
.. ipython:: python
19962010
1997-
df1.columns = pd.MultiIndex.from_tuples([("a", "one"), ("a", "two"), ("b", "three")])
2011+
df1.columns = pd.MultiIndex.from_tuples(
2012+
[("a", "one"), ("a", "two"), ("b", "three")]
2013+
)
19982014
df1.sort_values(by=("a", "two"))
19992015
20002016
@@ -2245,7 +2261,11 @@ to the correct type.
22452261
import datetime
22462262
22472263
df = pd.DataFrame(
2248-
[[1, 2], ["a", "b"], [datetime.datetime(2016, 3, 2), datetime.datetime(2016, 3, 2)]]
2264+
[
2265+
[1, 2],
2266+
["a", "b"],
2267+
[datetime.datetime(2016, 3, 2), datetime.datetime(2016, 3, 2)],
2268+
]
22492269
)
22502270
df = df.T
22512271
df

doc/source/user_guide/categorical.rst

+17-3
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,11 @@ The ordering of the categorical is determined by the ``categories`` of that colu
513513
514514
dfs = pd.DataFrame(
515515
{
516-
"A": pd.Categorical(list("bbeebbaa"), categories=["e", "a", "b"], ordered=True),
516+
"A": pd.Categorical(
517+
list("bbeebbaa"),
518+
categories=["e", "a", "b"],
519+
ordered=True,
520+
),
517521
"B": [1, 2, 1, 2, 2, 1, 2, 1],
518522
}
519523
)
@@ -642,7 +646,13 @@ Groupby will also show "unused" categories:
642646
df.groupby("cats").mean()
643647
644648
cats2 = pd.Categorical(["a", "a", "b", "b"], categories=["a", "b", "c"])
645-
df2 = pd.DataFrame({"cats": cats2, "B": ["c", "d", "c", "d"], "values": [1, 2, 3, 4]})
649+
df2 = pd.DataFrame(
650+
{
651+
"cats": cats2,
652+
"B": ["c", "d", "c", "d"],
653+
"values": [1, 2, 3, 4],
654+
}
655+
)
646656
df2.groupby(["cats", "B"]).mean()
647657
648658
@@ -1115,7 +1125,11 @@ You can use ``fillna`` to handle missing values before applying a function.
11151125
.. ipython:: python
11161126
11171127
df = pd.DataFrame(
1118-
{"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"], "cats": pd.Categorical([1, 2, 3, 2])}
1128+
{
1129+
"a": [1, 2, 3, 4],
1130+
"b": ["a", "b", "c", "d"],
1131+
"cats": pd.Categorical([1, 2, 3, 2]),
1132+
}
11191133
)
11201134
df.apply(lambda row: type(row["cats"]), axis=1)
11211135
df.apply(lambda col: col.dtype, axis=0)

doc/source/user_guide/computation.rst

+5-1
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,11 @@ can even be omitted:
787787

788788
.. ipython:: python
789789
790-
covs = df[["B", "C", "D"]].rolling(window=50).cov(df[["A", "B", "C"]], pairwise=True)
790+
covs = (
791+
df[["B", "C", "D"]]
792+
.rolling(window=50)
793+
.cov(df[["A", "B", "C"]], pairwise=True)
794+
)
791795
covs.loc["2002-09-22":]
792796
793797
.. ipython:: python

doc/source/user_guide/cookbook.rst

+34-7
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,9 @@ New columns
266266

267267
.. ipython:: python
268268
269-
df = pd.DataFrame({"AAA": [1, 1, 1, 2, 2, 2, 3, 3], "BBB": [2, 1, 3, 4, 5, 1, 2, 3]})
269+
df = pd.DataFrame(
270+
{"AAA": [1, 1, 1, 2, 2, 2, 3, 3], "BBB": [2, 1, 3, 4, 5, 1, 2, 3]}
271+
)
270272
df
271273
272274
Method 1 : idxmin() to get the index of the minimums
@@ -327,7 +329,9 @@ Arithmetic
327329

328330
.. ipython:: python
329331
330-
cols = pd.MultiIndex.from_tuples([(x, y) for x in ["A", "B", "C"] for y in ["O", "I"]])
332+
cols = pd.MultiIndex.from_tuples(
333+
[(x, y) for x in ["A", "B", "C"] for y in ["O", "I"]]
334+
)
331335
df = pd.DataFrame(np.random.randn(2, 6), index=["n", "m"], columns=cols)
332336
df
333337
df = df.div(df["C"], level=1)
@@ -566,7 +570,9 @@ Unlike agg, apply's callable is passed a sub-DataFrame which gives you access to
566570

567571
.. ipython:: python
568572
569-
df = pd.DataFrame({"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]})
573+
df = pd.DataFrame(
574+
{"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]}
575+
)
570576
df
571577
df["Counts"] = df.groupby(["Color"]).transform(len)
572578
df
@@ -648,7 +654,10 @@ Create a list of dataframes, split using a delineation based on logic included i
648654
dfs = list(
649655
zip(
650656
*df.groupby(
651-
(1 * (df["Case"] == "B")).cumsum().rolling(window=3, min_periods=1).median()
657+
(1 * (df["Case"] == "B"))
658+
.cumsum()
659+
.rolling(window=3, min_periods=1)
660+
.median()
652661
)
653662
)
654663
)[-1]
@@ -740,7 +749,18 @@ The :ref:`Pivot <reshaping.pivot>` docs.
740749
"yes",
741750
],
742751
"Passed": ["yes" if x > 50 else "no" for x in grades],
743-
"Employed": [True, True, True, False, False, False, False, True, True, False],
752+
"Employed": [
753+
True,
754+
True,
755+
True,
756+
False,
757+
False,
758+
False,
759+
False,
760+
True,
761+
True,
762+
False,
763+
],
744764
"Grade": grades,
745765
}
746766
)
@@ -791,7 +811,9 @@ Apply
791811
return pd.Series(aList)
792812
793813
794-
df_orgz = pd.concat({ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()})
814+
df_orgz = pd.concat(
815+
{ind: row.apply(SeriesFromSubList) for ind, row in df.iterrows()}
816+
)
795817
df_orgz
796818
797819
`Rolling apply with a DataFrame returning a Series
@@ -1162,7 +1184,12 @@ Option 1: pass rows explicitly to skip rows
11621184
from io import StringIO
11631185
11641186
pd.read_csv(
1165-
StringIO(data), sep=";", skiprows=[11, 12], index_col=0, parse_dates=True, header=10
1187+
StringIO(data),
1188+
sep=";",
1189+
skiprows=[11, 12],
1190+
index_col=0,
1191+
parse_dates=True,
1192+
header=10,
11661193
)
11671194
11681195
Option 2: read column names and then data

doc/source/user_guide/groupby.rst

+7-2
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,9 @@ the length of the ``groups`` dict, so it is largely just a convenience:
267267
height = np.random.normal(60, 10, size=n)
268268
time = pd.date_range("1/1/2000", periods=n)
269269
gender = np.random.choice(["male", "female"], size=n)
270-
df = pd.DataFrame({"height": height, "weight": weight, "gender": gender}, index=time)
270+
df = pd.DataFrame(
271+
{"height": height, "weight": weight, "gender": gender}, index=time
272+
)
271273
272274
.. ipython:: python
273275
@@ -767,7 +769,10 @@ For example, suppose we wished to standardize the data within each group:
767769
ts.head()
768770
ts.tail()
769771
770-
transformed = ts.groupby(lambda x: x.year).transform(lambda x: (x - x.mean()) / x.std())
772+
transformed = ts.groupby(lambda x: x.year).transform(
773+
lambda x: (x - x.mean()) / x.std()
774+
)
775+
771776
772777
We would expect the result to now have mean 0 and standard deviation 1 within
773778
each group, which we can easily check:

0 commit comments

Comments
 (0)