Skip to content

CLN: D208 Docstring is over-indented #31890

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ def axis(request):
@pytest.fixture(params=[0, "index"], ids=lambda x: f"axis {repr(x)}")
def axis_series(request):
"""
Fixture for returning the axis numbers of a Series.
"""
Fixture for returning the axis numbers of a Series.
"""
return request.param


Expand Down
3 changes: 1 addition & 2 deletions pandas/core/computation/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,8 +601,7 @@ def __init__(self, value, converted, kind: str):
self.kind = kind

def tostring(self, encoding) -> str:
""" quote the string if not encoded
else encode and return """
""" quote the string if not encoded else encode and return """
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are the leading/trailing spaces policy?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not according to PEP257, but that seems to popular in our codebase. There is a codecheck in pydocstyle for this, D210. This is for one-line docstrings and we seem to moving away from those, see #31162, #31462 and others.

I was wandering whether to enforce D210 yet. May be able to find a checker to find one-liners.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this would fail the numpy docstring check as well were this exposed as part of the API

if self.kind == "string":
if encoding is not None:
return str(self.converted)
Expand Down
5 changes: 3 additions & 2 deletions pandas/io/excel/_xlrd.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,9 @@ def get_sheet_data(self, sheet, convert_float):
epoch1904 = self.book.datemode

def _parse_cell(cell_contents, cell_typ):
"""converts the contents of the cell into a pandas
appropriate object"""
"""
converts the contents of the cell into a pandas appropriate object
"""

if cell_typ == XL_CELL_DATE:

Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -979,7 +979,7 @@ def to_html(
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
"""
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter

Klass = NotebookFormatter if notebook else HTMLFormatter
Expand Down
6 changes: 4 additions & 2 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1457,8 +1457,10 @@ def _should_parse_dates(self, i):
def _extract_multi_indexer_columns(
self, header, index_names, col_names, passed_names=False
):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
"""
extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers
"""
if len(header) < 2:
return header[0], index_names, col_names, passed_names

Expand Down
156 changes: 85 additions & 71 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,9 +569,10 @@ def __getattr__(self, name: str):
)

def __contains__(self, key: str) -> bool:
""" check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
Expand Down Expand Up @@ -1831,18 +1832,19 @@ def get_result(self, coordinates: bool = False):


class IndexCol:
""" an index column description class
"""
an index column description class

Parameters
----------
Parameters
----------

axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables

"""
"""

is_an_indexable = True
is_data_indexable = True
Expand Down Expand Up @@ -1999,9 +2001,11 @@ def __iter__(self):
return iter(self.values)

def maybe_set_size(self, min_itemsize=None):
""" maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size """
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if _ensure_decoded(self.kind) == "string":

if isinstance(min_itemsize, dict):
Expand Down Expand Up @@ -2051,8 +2055,10 @@ def validate_attr(self, append: bool):
)

def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""

for key in self._info_fields:

Expand Down Expand Up @@ -2140,17 +2146,18 @@ def set_attr(self):


class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
"""
a data holding column, by definition this is not indexable

Parameters
----------
Parameters
----------

data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""

is_an_indexable = False
is_data_indexable = False
Expand Down Expand Up @@ -2460,16 +2467,17 @@ class GenericDataIndexableCol(DataIndexableCol):


class Fixed:
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
"""
represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class

Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""

pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
Expand Down Expand Up @@ -2596,8 +2604,10 @@ def validate_version(self, where=None):
return True

def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
"""
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
"""

s = self.storable
if s is None:
Expand Down Expand Up @@ -3105,29 +3115,29 @@ class FrameFixed(BlockManagerFixed):


class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables

Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.

index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns

"""
"""
represent a table:
facilitate read/write of various types of tables

Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.

index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""

pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
Expand Down Expand Up @@ -4080,10 +4090,11 @@ def read_column(


class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
"""
a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""

table_type = "worm"

Expand All @@ -4094,14 +4105,16 @@ def read(
start: Optional[int] = None,
stop: Optional[int] = None,
):
""" read the indices and the indexing array, calculate offset rows and
return """
"""
read the indices and the indexing array, calculate offset rows and return
"""
raise NotImplementedError("WORMTable needs to implement read")

def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")

Expand Down Expand Up @@ -4170,8 +4183,9 @@ def write(
table.write_data(chunksize, dropna=dropna)

def write_data(self, chunksize: Optional[int], dropna: bool = False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""

names = self.dtype.names
nrows = self.nrows_expected
Expand Down
6 changes: 4 additions & 2 deletions pandas/io/sas/sas7bdat.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,10 @@ def column_data_offsets(self):
return np.asarray(self._column_data_offsets, dtype=np.int64)

def column_types(self):
"""Returns a numpy character array of the column types:
s (string) or d (double)"""
"""
Returns a numpy character array of the column types:
s (string) or d (double)
"""
return np.asarray(self._column_types, dtype=np.dtype("S1"))

def close(self):
Expand Down
9 changes: 5 additions & 4 deletions pandas/plotting/_matplotlib/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,10 +509,11 @@ def _adorn_subplots(self):
self.axes[0].set_title(self.title)

def _apply_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""

if rot is not None or fontsize is not None:
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/extension/test_datetime.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ def data_missing_for_sorting(dtype):
@pytest.fixture
def data_for_grouping(dtype):
"""
Expected to be like [B, B, NA, NA, A, A, B, C]
Expected to be like [B, B, NA, NA, A, A, B, C]

Where A < B < C and NA is missing
Where A < B < C and NA is missing
"""
a = pd.Timestamp("2000-01-01")
b = pd.Timestamp("2000-01-02")
Expand Down
8 changes: 5 additions & 3 deletions pandas/tests/generic/test_generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,11 @@ def _axes(self):
return self._typ._AXIS_ORDERS

def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
"""
construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed
"""

if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/groupby/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1496,7 +1496,7 @@ def test_groupby_reindex_inside_function():

def agg_before(hour, func, fix=False):
"""
Run an aggregate func on the subset of data.
Run an aggregate func on the subset of data.
"""

def _func(data):
Expand Down
5 changes: 2 additions & 3 deletions pandas/tests/scalar/timestamp/test_unary_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,8 @@ def test_round_dst_border_nonexistent(self, method, ts_str, freq):
],
)
def test_round_int64(self, timestamp, freq):
"""check that all rounding modes are accurate to int64 precision
see GH#22591
"""
# check that all rounding modes are accurate to int64 precision
# see GH#22591
dt = Timestamp(timestamp)
unit = to_offset(freq).nanos

Expand Down