Skip to content

issue 48855 enable pylint unnecessary-pass #49418

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion pandas/core/computation/engines.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ def _evaluate(self):
-----
Must be implemented by subclasses.
"""
pass


class NumExprEngine(AbstractEngine):
Expand Down
2 changes: 0 additions & 2 deletions pandas/core/dtypes/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,6 @@ class CategoricalDtypeType(type):
the type of CategoricalDtype, this metaclass determines subclass ability
"""

pass


@register_extension_dtype
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
Expand Down
1 change: 0 additions & 1 deletion pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -3610,7 +3610,6 @@ def _union(self, other, sort) -> MultiIndex:
RuntimeWarning,
stacklevel=find_stack_level(),
)
pass
return result

def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
Expand Down
25 changes: 0 additions & 25 deletions pandas/core/interchange/dataframe_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,15 +134,13 @@ def bufsize(self) -> int:
"""
Buffer size in bytes.
"""
pass

@property
@abstractmethod
def ptr(self) -> int:
"""
Pointer to start of the buffer as an integer.
"""
pass

@abstractmethod
def __dlpack__(self):
Expand All @@ -166,7 +164,6 @@ def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:
Uses device type codes matching DLPack.
Note: must be implemented even if ``__dlpack__`` is not.
"""
pass


class Column(ABC):
Expand Down Expand Up @@ -222,7 +219,6 @@ def size(self) -> int:
Corresponds to DataFrame.num_rows() if column is a single chunk;
equal to size of this current chunk otherwise.
"""
pass

@property
@abstractmethod
Expand All @@ -234,7 +230,6 @@ def offset(self) -> int:
equal size M (only the last chunk may be shorter),
``offset = n * M``, ``n = 0 .. N-1``.
"""
pass

@property
@abstractmethod
Expand Down Expand Up @@ -266,7 +261,6 @@ def dtype(self) -> tuple[DtypeKind, int, str, str]:
- Data types not included: complex, Arrow-style null, binary, decimal,
and nested (list, struct, map, union) dtypes.
"""
pass

@property
@abstractmethod
Expand All @@ -289,7 +283,6 @@ def describe_categorical(self) -> CategoricalDescription:

TBD: are there any other in-memory representations that are needed?
"""
pass

@property
@abstractmethod
Expand All @@ -302,7 +295,6 @@ def describe_null(self) -> tuple[ColumnNullType, Any]:
mask or a byte mask, the value (0 or 1) indicating a missing value. None
otherwise.
"""
pass

@property
@abstractmethod
Expand All @@ -312,22 +304,19 @@ def null_count(self) -> int | None:

Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
"""
pass

@property
@abstractmethod
def metadata(self) -> dict[str, Any]:
"""
The metadata for the column. See `DataFrame.metadata` for more details.
"""
pass

@abstractmethod
def num_chunks(self) -> int:
"""
Return the number of chunks the column consists of.
"""
pass

@abstractmethod
def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
Expand All @@ -336,7 +325,6 @@ def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:

See `DataFrame.get_chunks` for details on ``n_chunks``.
"""
pass

@abstractmethod
def get_buffers(self) -> ColumnBuffers:
Expand All @@ -360,7 +348,6 @@ def get_buffers(self) -> ColumnBuffers:
if the data buffer does not have an associated offsets
buffer.
"""
pass


# def get_children(self) -> Iterable[Column]:
Expand Down Expand Up @@ -391,7 +378,6 @@ class DataFrame(ABC):
@abstractmethod
def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True):
"""Construct a new interchange object, potentially changing the parameters."""
pass

@property
@abstractmethod
Expand All @@ -405,14 +391,12 @@ def metadata(self) -> dict[str, Any]:
entries, please add name the keys with the name of the library
followed by a period and the desired name, e.g, ``pandas.indexcol``.
"""
pass

@abstractmethod
def num_columns(self) -> int:
"""
Return the number of columns in the DataFrame.
"""
pass

@abstractmethod
def num_rows(self) -> int | None:
Expand All @@ -422,56 +406,48 @@ def num_rows(self) -> int | None:
"""
Return the number of rows in the DataFrame, if available.
"""
pass

@abstractmethod
def num_chunks(self) -> int:
"""
Return the number of chunks the DataFrame consists of.
"""
pass

@abstractmethod
def column_names(self) -> Iterable[str]:
"""
Return an iterator yielding the column names.
"""
pass

@abstractmethod
def get_column(self, i: int) -> Column:
"""
Return the column at the indicated position.
"""
pass

@abstractmethod
def get_column_by_name(self, name: str) -> Column:
"""
Return the column whose name is the indicated name.
"""
pass

@abstractmethod
def get_columns(self) -> Iterable[Column]:
"""
Return an iterator yielding the columns.
"""
pass

@abstractmethod
def select_columns(self, indices: Sequence[int]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by index.
"""
pass

@abstractmethod
def select_columns_by_name(self, names: Sequence[str]) -> DataFrame:
"""
Create a new DataFrame by selecting a subset of columns by name.
"""
pass

@abstractmethod
def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
Expand All @@ -483,4 +459,3 @@ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]:
``self.num_chunks()``, meaning the producer must subdivide each chunk
before yielding it.
"""
pass
5 changes: 0 additions & 5 deletions pandas/io/excel/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1142,7 +1142,6 @@ def engine(self) -> str:
@abc.abstractmethod
def sheets(self) -> dict[str, Any]:
"""Mapping of sheet names to sheet objects."""
pass

@property
@abc.abstractmethod
Expand All @@ -1152,15 +1151,13 @@ def book(self):

This attribute can be used to access engine-specific features.
"""
pass

@book.setter
@abc.abstractmethod
def book(self, other) -> None:
"""
Set book instance. Class type will depend on the engine used.
"""
pass

def write_cells(
self,
Expand Down Expand Up @@ -1212,7 +1209,6 @@ def _write_cells(
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass

def save(self) -> None:
"""
Expand All @@ -1228,7 +1224,6 @@ def _save(self) -> None:
"""
Save workbook to disk.
"""
pass

def __init__(
self,
Expand Down
1 change: 0 additions & 1 deletion pandas/io/json/_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,6 @@ def write(self) -> str:
@abstractmethod
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
"""Object to write in JSON format."""
pass


class SeriesWriter(Writer):
Expand Down
5 changes: 0 additions & 5 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -2593,8 +2593,6 @@ def get_atom_timedelta64(cls, shape):
class GenericDataIndexableCol(DataIndexableCol):
"""represent a generic pytables data column"""

pass


class Fixed:
"""
Expand Down Expand Up @@ -2701,11 +2699,9 @@ def attrs(self):

def set_attrs(self) -> None:
"""set our object attributes"""
pass

def get_attrs(self) -> None:
"""get our object attributes"""
pass

@property
def storable(self):
Expand All @@ -2728,7 +2724,6 @@ def validate(self, other) -> Literal[True] | None:

def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
pass

def infer_axes(self) -> bool:
"""
Expand Down
6 changes: 0 additions & 6 deletions pandas/io/stata.py
Original file line number Diff line number Diff line change
Expand Up @@ -2417,7 +2417,6 @@ def _replace_nans(self, data: DataFrame) -> DataFrame:

def _update_strl_names(self) -> None:
"""No-op, forward compatibility"""
pass

def _validate_variable_name(self, name: str) -> str:
"""
Expand Down Expand Up @@ -2701,19 +2700,15 @@ def _close(self) -> None:

def _write_map(self) -> None:
"""No-op, future compatibility"""
pass

def _write_file_close_tag(self) -> None:
"""No-op, future compatibility"""
pass

def _write_characteristics(self) -> None:
"""No-op, future compatibility"""
pass

def _write_strls(self) -> None:
"""No-op, future compatibility"""
pass

def _write_expansion_fields(self) -> None:
"""Write 5 zeros for expansion fields"""
Expand Down Expand Up @@ -3438,7 +3433,6 @@ def _write_strls(self) -> None:

def _write_expansion_fields(self) -> None:
"""No-op in dta 117+"""
pass

def _write_value_labels(self) -> None:
self._update_map("value_labels")
Expand Down
1 change: 0 additions & 1 deletion pandas/plotting/_matplotlib/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -668,7 +668,6 @@ def _post_plot_logic_common(self, ax, data):

def _post_plot_logic(self, ax, data) -> None:
"""Post process for each axes. Overridden in child classes"""
pass

def _adorn_subplots(self):
"""Common post process unrelated to data"""
Expand Down
1 change: 0 additions & 1 deletion pandas/tests/base/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ def _get_foo(self):

def bar(self, *args, **kwargs):
"""a test bar method"""
pass

class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj) -> None:
Expand Down
1 change: 0 additions & 1 deletion pandas/tests/indexes/interval/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,6 @@ def test_generic_errors(self, constructor):
override the base class implementation since errors are handled
differently; checks unnecessary since caught at the Interval level
"""
pass

def test_constructor_string(self):
# GH23013
Expand Down
2 changes: 0 additions & 2 deletions pandas/tests/io/json/test_ujson.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,11 +423,9 @@ def test_encode_recursion_max(self):

class O2:
member = 0
pass

class O1:
member = 0
pass

decoded_input = O1()
decoded_input.member = O2()
Expand Down
1 change: 0 additions & 1 deletion pandas/tests/io/parser/test_parse_dates.py
Original file line number Diff line number Diff line change
Expand Up @@ -1673,7 +1673,6 @@ def _helper_hypothesis_delimited_date(call, date_string, **kwargs):
result = call(date_string, **kwargs)
except ValueError as er:
msg = str(er)
pass
return msg, result


Expand Down
1 change: 0 additions & 1 deletion pandas/tests/util/test_deprecate.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ def new_func_with_deprecation():

This is the extended summary. The deprecate directive goes before this.
"""
pass


def test_deprecate_ok():
Expand Down
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ disable = [
"try-except-raise",
"undefined-loop-variable",
"unnecessary-lambda",
"unnecessary-pass",
"unspecified-encoding",
"unused-argument",
"unused-import",
Expand Down
Loading