Skip to content

Commit 4d3964e

Browse files
authored
STY: Use ruff to format docstrings (#56863)
* Bump ruff * Format docstrings using ruff * Fix double {{ examples * Missed shift * Add comma * Remove import * Remove old flake8 validation * Remove unit test * Revert "Remove unit test" This reverts commit 6386676. * Revert "Remove old flake8 validation" This reverts commit d004398. * docstring formatting * Ignore formatting in scripts * Ignore flake8 conflicts with ruff
1 parent 67055d5 commit 4d3964e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

100 files changed

+3244
-2554
lines changed

.pre-commit-config.yaml

+2-3
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ ci:
1919
skip: [pylint, pyright, mypy]
2020
repos:
2121
- repo: https://github.com/astral-sh/ruff-pre-commit
22-
rev: v0.1.6
22+
rev: v0.1.13
2323
hooks:
2424
- id: ruff
2525
args: [--exit-non-zero-on-fix]
@@ -31,8 +31,7 @@ repos:
3131
exclude: ^pandas/tests
3232
args: [--select, "ANN001,ANN2", --fix-only, --exit-non-zero-on-fix]
3333
- id: ruff-format
34-
# TODO: "." not needed in ruff 0.1.8
35-
args: ["."]
34+
exclude: ^scripts
3635
- repo: https://github.com/jendrikseipp/vulture
3736
rev: 'v2.10'
3837
hooks:

doc/make.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def _run_os(*args) -> None:
113113
114114
Examples
115115
--------
116-
>>> DocBuilder()._run_os('python', '--version')
116+
>>> DocBuilder()._run_os("python", "--version")
117117
"""
118118
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
119119

@@ -129,7 +129,7 @@ def _sphinx_build(self, kind: str):
129129
130130
Examples
131131
--------
132-
>>> DocBuilder(num_jobs=4)._sphinx_build('html')
132+
>>> DocBuilder(num_jobs=4)._sphinx_build("html")
133133
"""
134134
if kind not in ("html", "latex", "linkcheck"):
135135
raise ValueError(f"kind must be html, latex or linkcheck, not {kind}")

pandas/_config/config.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@ class option_context(ContextDecorator):
476476
Examples
477477
--------
478478
>>> from pandas import option_context
479-
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
479+
>>> with option_context("display.max_rows", 10, "display.max_columns", 5):
480480
... pass
481481
"""
482482

pandas/_testing/_warnings.py

-2
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,8 @@ class for all warnings. To raise multiple types of exceptions,
7676
>>> import warnings
7777
>>> with assert_produces_warning():
7878
... warnings.warn(UserWarning())
79-
...
8079
>>> with assert_produces_warning(False):
8180
... warnings.warn(RuntimeWarning())
82-
...
8381
Traceback (most recent call last):
8482
...
8583
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].

pandas/_testing/asserters.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1178,8 +1178,8 @@ def assert_frame_equal(
11781178
but with columns of differing dtypes.
11791179
11801180
>>> from pandas.testing import assert_frame_equal
1181-
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
1182-
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
1181+
>>> df1 = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
1182+
>>> df2 = pd.DataFrame({"a": [1, 2], "b": [3.0, 4.0]})
11831183
11841184
df1 equals itself.
11851185

pandas/_testing/contexts.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,8 @@ def set_timezone(tz: str) -> Generator[None, None, None]:
7070
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
7171
'IST'
7272
73-
>>> with set_timezone('US/Eastern'):
73+
>>> with set_timezone("US/Eastern"):
7474
... tzlocal().tzname(datetime(2021, 1, 1))
75-
...
7675
'EST'
7776
"""
7877
import time

pandas/core/accessor.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def __init__(self, pandas_object): # noqa: E999
265265
For consistency with pandas methods, you should raise an ``AttributeError``
266266
if the data passed to your accessor has an incorrect dtype.
267267
268-
>>> pd.Series(['a', 'b']).dt
268+
>>> pd.Series(["a", "b"]).dt
269269
Traceback (most recent call last):
270270
...
271271
AttributeError: Can only use .dt accessor with datetimelike values
@@ -274,8 +274,6 @@ def __init__(self, pandas_object): # noqa: E999
274274
--------
275275
In your library code::
276276
277-
import pandas as pd
278-
279277
@pd.api.extensions.register_dataframe_accessor("geo")
280278
class GeoAccessor:
281279
def __init__(self, pandas_obj):

pandas/core/algorithms.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -1215,8 +1215,9 @@ def take(
12151215
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True)
12161216
array([10., 10., nan])
12171217
1218-
>>> pd.api.extensions.take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True,
1219-
... fill_value=-10)
1218+
>>> pd.api.extensions.take(
1219+
... np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, fill_value=-10
1220+
... )
12201221
array([ 10, 10, -10])
12211222
"""
12221223
if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries)):

pandas/core/apply.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -1794,14 +1794,14 @@ def normalize_keyword_aggregation(
17941794

17951795

17961796
def _make_unique_kwarg_list(
1797-
seq: Sequence[tuple[Any, Any]]
1797+
seq: Sequence[tuple[Any, Any]],
17981798
) -> Sequence[tuple[Any, Any]]:
17991799
"""
18001800
Uniquify aggfunc name of the pairs in the order list
18011801
18021802
Examples:
18031803
--------
1804-
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
1804+
>>> kwarg_list = [("a", "<lambda>"), ("a", "<lambda>"), ("b", "<lambda>")]
18051805
>>> _make_unique_kwarg_list(kwarg_list)
18061806
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
18071807
"""
@@ -1833,7 +1833,7 @@ def relabel_result(
18331833
>>> from pandas.core.apply import relabel_result
18341834
>>> result = pd.DataFrame(
18351835
... {"A": [np.nan, 2, np.nan], "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]},
1836-
... index=["max", "mean", "min"]
1836+
... index=["max", "mean", "min"],
18371837
... )
18381838
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
18391839
>>> columns = ("foo", "aab", "bar", "dat")
@@ -1972,7 +1972,7 @@ def maybe_mangle_lambdas(agg_spec: Any) -> Any:
19721972
19731973
Examples
19741974
--------
1975-
>>> maybe_mangle_lambdas('sum')
1975+
>>> maybe_mangle_lambdas("sum")
19761976
'sum'
19771977
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
19781978
[<function __main__.<lambda_0>,
@@ -2017,7 +2017,7 @@ def validate_func_kwargs(
20172017
20182018
Examples
20192019
--------
2020-
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
2020+
>>> validate_func_kwargs({"one": "min", "two": "max"})
20212021
(['one', 'two'], ['min', 'max'])
20222022
"""
20232023
tuple_given_message = "func is expected but received {} in **kwargs."

pandas/core/arraylike.py

+15-13
Original file line numberDiff line numberDiff line change
@@ -119,40 +119,41 @@ def __add__(self, other):
119119
120120
Examples
121121
--------
122-
>>> df = pd.DataFrame({'height': [1.5, 2.6], 'weight': [500, 800]},
123-
... index=['elk', 'moose'])
122+
>>> df = pd.DataFrame(
123+
... {"height": [1.5, 2.6], "weight": [500, 800]}, index=["elk", "moose"]
124+
... )
124125
>>> df
125126
height weight
126127
elk 1.5 500
127128
moose 2.6 800
128129
129130
Adding a scalar affects all rows and columns.
130131
131-
>>> df[['height', 'weight']] + 1.5
132+
>>> df[["height", "weight"]] + 1.5
132133
height weight
133134
elk 3.0 501.5
134135
moose 4.1 801.5
135136
136137
Each element of a list is added to a column of the DataFrame, in order.
137138
138-
>>> df[['height', 'weight']] + [0.5, 1.5]
139+
>>> df[["height", "weight"]] + [0.5, 1.5]
139140
height weight
140141
elk 2.0 501.5
141142
moose 3.1 801.5
142143
143144
Keys of a dictionary are aligned to the DataFrame, based on column names;
144145
each value in the dictionary is added to the corresponding column.
145146
146-
>>> df[['height', 'weight']] + {'height': 0.5, 'weight': 1.5}
147+
>>> df[["height", "weight"]] + {"height": 0.5, "weight": 1.5}
147148
height weight
148149
elk 2.0 501.5
149150
moose 3.1 801.5
150151
151152
When `other` is a :class:`Series`, the index of `other` is aligned with the
152153
columns of the DataFrame.
153154
154-
>>> s1 = pd.Series([0.5, 1.5], index=['weight', 'height'])
155-
>>> df[['height', 'weight']] + s1
155+
>>> s1 = pd.Series([0.5, 1.5], index=["weight", "height"])
156+
>>> df[["height", "weight"]] + s1
156157
height weight
157158
elk 3.0 500.5
158159
moose 4.1 800.5
@@ -161,23 +162,24 @@ def __add__(self, other):
161162
the :class:`Series` will not be reoriented. If index-wise alignment is desired,
162163
:meth:`DataFrame.add` should be used with `axis='index'`.
163164
164-
>>> s2 = pd.Series([0.5, 1.5], index=['elk', 'moose'])
165-
>>> df[['height', 'weight']] + s2
165+
>>> s2 = pd.Series([0.5, 1.5], index=["elk", "moose"])
166+
>>> df[["height", "weight"]] + s2
166167
elk height moose weight
167168
elk NaN NaN NaN NaN
168169
moose NaN NaN NaN NaN
169170
170-
>>> df[['height', 'weight']].add(s2, axis='index')
171+
>>> df[["height", "weight"]].add(s2, axis="index")
171172
height weight
172173
elk 2.0 500.5
173174
moose 4.1 801.5
174175
175176
When `other` is a :class:`DataFrame`, both columns names and the
176177
index are aligned.
177178
178-
>>> other = pd.DataFrame({'height': [0.2, 0.4, 0.6]},
179-
... index=['elk', 'moose', 'deer'])
180-
>>> df[['height', 'weight']] + other
179+
>>> other = pd.DataFrame(
180+
... {"height": [0.2, 0.4, 0.6]}, index=["elk", "moose", "deer"]
181+
... )
182+
>>> df[["height", "weight"]] + other
181183
height weight
182184
deer NaN NaN
183185
elk 1.7 NaN

pandas/core/arrays/arrow/accessors.py

+21-25
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,7 @@ def len(self) -> Series:
100100
... [1, 2, 3],
101101
... [3],
102102
... ],
103-
... dtype=pd.ArrowDtype(pa.list_(
104-
... pa.int64()
105-
... ))
103+
... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
106104
... )
107105
>>> s.list.len()
108106
0 3
@@ -136,9 +134,7 @@ def __getitem__(self, key: int | slice) -> Series:
136134
... [1, 2, 3],
137135
... [3],
138136
... ],
139-
... dtype=pd.ArrowDtype(pa.list_(
140-
... pa.int64()
141-
... ))
137+
... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
142138
... )
143139
>>> s.list[0]
144140
0 1
@@ -195,9 +191,7 @@ def flatten(self) -> Series:
195191
... [1, 2, 3],
196192
... [3],
197193
... ],
198-
... dtype=pd.ArrowDtype(pa.list_(
199-
... pa.int64()
200-
... ))
194+
... dtype=pd.ArrowDtype(pa.list_(pa.int64())),
201195
... )
202196
>>> s.list.flatten()
203197
0 1
@@ -253,9 +247,9 @@ def dtypes(self) -> Series:
253247
... {"version": 2, "project": "pandas"},
254248
... {"version": 1, "project": "numpy"},
255249
... ],
256-
... dtype=pd.ArrowDtype(pa.struct(
257-
... [("version", pa.int64()), ("project", pa.string())]
258-
... ))
250+
... dtype=pd.ArrowDtype(
251+
... pa.struct([("version", pa.int64()), ("project", pa.string())])
252+
... ),
259253
... )
260254
>>> s.struct.dtypes
261255
version int64[pyarrow]
@@ -324,9 +318,9 @@ def field(
324318
... {"version": 2, "project": "pandas"},
325319
... {"version": 1, "project": "numpy"},
326320
... ],
327-
... dtype=pd.ArrowDtype(pa.struct(
328-
... [("version", pa.int64()), ("project", pa.string())]
329-
... ))
321+
... dtype=pd.ArrowDtype(
322+
... pa.struct([("version", pa.int64()), ("project", pa.string())])
323+
... ),
330324
... )
331325
332326
Extract by field name.
@@ -357,19 +351,21 @@ def field(
357351
For nested struct types, you can pass a list of values to index
358352
multiple levels:
359353
360-
>>> version_type = pa.struct([
361-
... ("major", pa.int64()),
362-
... ("minor", pa.int64()),
363-
... ])
354+
>>> version_type = pa.struct(
355+
... [
356+
... ("major", pa.int64()),
357+
... ("minor", pa.int64()),
358+
... ]
359+
... )
364360
>>> s = pd.Series(
365361
... [
366362
... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
367363
... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
368364
... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
369365
... ],
370-
... dtype=pd.ArrowDtype(pa.struct(
371-
... [("version", version_type), ("project", pa.string())]
372-
... ))
366+
... dtype=pd.ArrowDtype(
367+
... pa.struct([("version", version_type), ("project", pa.string())])
368+
... ),
373369
... )
374370
>>> s.struct.field(["version", "minor"])
375371
0 5
@@ -454,9 +450,9 @@ def explode(self) -> DataFrame:
454450
... {"version": 2, "project": "pandas"},
455451
... {"version": 1, "project": "numpy"},
456452
... ],
457-
... dtype=pd.ArrowDtype(pa.struct(
458-
... [("version", pa.int64()), ("project", pa.string())]
459-
... ))
453+
... dtype=pd.ArrowDtype(
454+
... pa.struct([("version", pa.int64()), ("project", pa.string())])
455+
... ),
460456
... )
461457
462458
>>> s.struct.explode()

0 commit comments

Comments
 (0)