Skip to content

TYP: more simple return types from ruff #56628

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,7 @@ def generate_numba_apply_func(
def apply_with_numba(self):
pass

def validate_values_for_numba(self):
def validate_values_for_numba(self) -> None:
# Validate column dtyps all OK
for colname, dtype in self.obj.dtypes.items():
if not is_numeric_dtype(dtype):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/array_algos/replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def compare_or_regex_search(

def _check_comparison_types(
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
):
) -> None:
"""
Raises an error if the two arrays (a,b) cannot be compared.
Otherwise, returns the comparison result as expected.
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def __init__(self, data, validation_msg: str) -> None:
def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
pass

def _validate(self, data):
def _validate(self, data) -> None:
dtype = data.dtype
if not isinstance(dtype, ArrowDtype):
# Raise AttributeError so that inspect can handle non-struct Series.
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/arrow/extension_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def to_pandas_dtype(self) -> IntervalDtype:
"""


def patch_pyarrow():
def patch_pyarrow() -> None:
# starting from pyarrow 14.0.1, it has its own mechanism
if not pa_version_under14p1:
return
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -2164,7 +2164,9 @@ def __contains__(self, key) -> bool:
# ------------------------------------------------------------------
# Rendering Methods

def _formatter(self, boxed: bool = False):
# error: Return type "None" of "_formatter" incompatible with return
# type "Callable[[Any], str | None]" in supertype "ExtensionArray"
def _formatter(self, boxed: bool = False) -> None: # type: ignore[override]
# Returning None here will cause format_array to do inference.
return None

Expand Down Expand Up @@ -2890,7 +2892,7 @@ def __init__(self, data) -> None:
self._freeze()

@staticmethod
def _validate(data):
def _validate(data) -> None:
if not isinstance(data.dtype, CategoricalDtype):
raise AttributeError("Can only use .cat accessor with a 'category' dtype")

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -2058,7 +2058,7 @@ def freq(self, value) -> None:
self._freq = value

@final
def _maybe_pin_freq(self, freq, validate_kwds: dict):
def _maybe_pin_freq(self, freq, validate_kwds: dict) -> None:
"""
Constructor helper to pin the appropriate `freq` attribute. Assumes
that self._freq is currently set to any freq inferred in
Expand Down Expand Up @@ -2092,7 +2092,7 @@ def _maybe_pin_freq(self, freq, validate_kwds: dict):

@final
@classmethod
def _validate_frequency(cls, index, freq: BaseOffset, **kwargs):
def _validate_frequency(cls, index, freq: BaseOffset, **kwargs) -> None:
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/arrays/sparse/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def __init__(self, data=None) -> None:
self._parent = data
self._validate(data)

def _validate(self, data):
def _validate(self, data) -> None:
raise NotImplementedError


Expand All @@ -50,7 +50,7 @@ class SparseAccessor(BaseAccessor, PandasDelegate):
array([2, 2, 2])
"""

def _validate(self, data):
def _validate(self, data) -> None:
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)

Expand Down Expand Up @@ -243,7 +243,7 @@ class SparseFrameAccessor(BaseAccessor, PandasDelegate):
0.5
"""

def _validate(self, data):
def _validate(self, data) -> None:
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
Expand Down
4 changes: 3 additions & 1 deletion pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -1830,7 +1830,9 @@ def __repr__(self) -> str:
pp_index = printing.pprint_thing(self.sp_index)
return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"

def _formatter(self, boxed: bool = False):
# error: Return type "None" of "_formatter" incompatible with return
# type "Callable[[Any], str | None]" in supertype "ExtensionArray"
def _formatter(self, boxed: bool = False) -> None: # type: ignore[override]
# Defer to the formatter from the GenericArrayFormatter calling us.
# This will infer the correct formatter from the dtype of the values.
return None
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/sparse/scipy_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
)


def _check_is_partition(parts: Iterable, whole: Iterable):
def _check_is_partition(parts: Iterable, whole: Iterable) -> None:
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/string_.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def __init__(self, values, copy: bool = False) -> None:
self._validate()
NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python"))

def _validate(self):
def _validate(self) -> None:
"""Validate that we only store NA or strings."""
if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _check_engine(engine: str | None) -> str:
return engine


def _check_parser(parser: str):
def _check_parser(parser: str) -> None:
"""
Make sure a valid parser is passed.

Expand All @@ -91,7 +91,7 @@ def _check_parser(parser: str):
)


def _check_resolvers(resolvers):
def _check_resolvers(resolvers) -> None:
if resolvers is not None:
for resolver in resolvers:
if not hasattr(resolver, "__getitem__"):
Expand All @@ -102,7 +102,7 @@ def _check_resolvers(resolvers):
)


def _check_expression(expr):
def _check_expression(expr) -> None:
"""
Make sure an expression is not an empty string

Expand Down Expand Up @@ -149,7 +149,7 @@ def _convert_expression(expr) -> str:
return s


def _check_for_locals(expr: str, stack_level: int, parser: str):
def _check_for_locals(expr: str, stack_level: int, parser: str) -> None:
at_top_of_stack = stack_level == 0
not_pandas_parser = parser != "pandas"

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def stringify(value):
v = v.tz_convert("UTC")
self.lhs.update(v)

def _disallow_scalar_only_bool_ops(self):
def _disallow_scalar_only_bool_ops(self) -> None:
rhs = self.rhs
lhs = self.lhs

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
return value


def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None:
"""
numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
vice-versa, but we do not want to allow this, so we need to
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -4316,7 +4316,7 @@ def _setitem_array(self, key, value):
else:
self._iset_not_inplace(key, value)

def _iset_not_inplace(self, key, value):
def _iset_not_inplace(self, key, value) -> None:
# GH#39510 when setting with df[key] = obj with a list-like key and
# list-like value, we iterate over those listlikes and set columns
# one at a time. This is different from dispatching to
Expand Down Expand Up @@ -4360,7 +4360,7 @@ def igetitem(obj, i: int):
finally:
self.columns = orig_columns

def _setitem_frame(self, key, value):
def _setitem_frame(self, key, value) -> None:
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -4394,7 +4394,7 @@ def _check_is_chained_assignment_possible(self) -> bool_t:
return False

@final
def _check_setitem_copy(self, t: str = "setting", force: bool_t = False):
def _check_setitem_copy(self, t: str = "setting", force: bool_t = False) -> None:
"""

Parameters
Expand Down Expand Up @@ -4510,7 +4510,7 @@ def __delitem__(self, key) -> None:
# Unsorted

@final
def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t):
def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t) -> None:
if inplace and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'inplace=True' when "
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3209,7 +3209,7 @@ def _get_reconciled_name_object(self, other):
return self

@final
def _validate_sort_keyword(self, sort):
def _validate_sort_keyword(self, sort) -> None:
if sort not in [None, False, True]:
raise ValueError(
"The 'sort' keyword only takes the values of "
Expand Down Expand Up @@ -6051,7 +6051,7 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
# by RangeIndex, MultIIndex
return self._data.argsort(*args, **kwargs)

def _check_indexing_error(self, key):
def _check_indexing_error(self, key) -> None:
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -1571,7 +1571,7 @@ def _format_multi(
def _get_names(self) -> FrozenList:
return FrozenList(self._names)

def _set_names(self, names, *, level=None, validate: bool = True):
def _set_names(self, names, *, level=None, validate: bool = True) -> None:
"""
Set new names on index. Each name has to be a hashable type.

Expand Down
14 changes: 8 additions & 6 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -911,7 +911,7 @@ def __setitem__(self, key, value) -> None:
iloc = self if self.name == "iloc" else self.obj.iloc
iloc._setitem_with_indexer(indexer, value, self.name)

def _validate_key(self, key, axis: AxisInt):
def _validate_key(self, key, axis: AxisInt) -> None:
"""
Ensure that key is valid for current indexer.

Expand Down Expand Up @@ -1225,7 +1225,7 @@ class _LocIndexer(_LocationIndexer):
# Key Checks

@doc(_LocationIndexer._validate_key)
def _validate_key(self, key, axis: Axis):
def _validate_key(self, key, axis: Axis) -> None:
# valid for a collection of labels (we check their presence later)
# slice of labels (where start-end in labels)
# slice of integers (only if in the labels)
Expand Down Expand Up @@ -1572,7 +1572,7 @@ class _iLocIndexer(_LocationIndexer):
# -------------------------------------------------------------------
# Key Checks

def _validate_key(self, key, axis: AxisInt):
def _validate_key(self, key, axis: AxisInt) -> None:
if com.is_bool_indexer(key):
if hasattr(key, "index") and isinstance(key.index, Index):
if key.index.inferred_type == "integer":
Expand Down Expand Up @@ -1783,7 +1783,7 @@ def _get_setitem_indexer(self, key):

# -------------------------------------------------------------------

def _setitem_with_indexer(self, indexer, value, name: str = "iloc"):
def _setitem_with_indexer(self, indexer, value, name: str = "iloc") -> None:
"""
_setitem_with_indexer is for setting values on a Series/DataFrame
using positional indexers.
Expand Down Expand Up @@ -2038,7 +2038,7 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str):
for loc in ilocs:
self._setitem_single_column(loc, value, pi)

def _setitem_with_indexer_2d_value(self, indexer, value):
def _setitem_with_indexer_2d_value(self, indexer, value) -> None:
# We get here with np.ndim(value) == 2, excluding DataFrame,
# which goes through _setitem_with_indexer_frame_value
pi = indexer[0]
Expand All @@ -2060,7 +2060,9 @@ def _setitem_with_indexer_2d_value(self, indexer, value):
value_col = value_col.tolist()
self._setitem_single_column(loc, value_col, pi)

def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str):
def _setitem_with_indexer_frame_value(
self, indexer, value: DataFrame, name: str
) -> None:
ilocs = self._ensure_iterable_column_indexer(indexer[1])

sub_indexer = list(indexer)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@


class _AlreadyWarned:
def __init__(self):
def __init__(self) -> None:
# This class is used on the manager level to the block level to
# ensure that we warn only once. The block method can update the
# warned_already option without returning a value to keep the
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1940,13 +1940,15 @@ def _post_setstate(self) -> None:
def _block(self) -> Block:
return self.blocks[0]

# error: Cannot override writeable attribute with read-only property
@property
def _blknos(self):
def _blknos(self) -> None: # type: ignore[override]
"""compat with BlockManager"""
return None

# error: Cannot override writeable attribute with read-only property
@property
def _blklocs(self):
def _blklocs(self) -> None: # type: ignore[override]
"""compat with BlockManager"""
return None

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/ops/array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,7 @@ def maybe_prepare_scalar_for_op(obj, shape: Shape):
}


def _bool_arith_check(op, a: np.ndarray, b):
def _bool_arith_check(op, a: np.ndarray, b) -> None:
"""
In contrast to numpy, pandas raises an error for certain operations
with booleans.
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -765,7 +765,7 @@ def _get_concat_axis(self) -> Index:

return concat_axis

def _maybe_check_integrity(self, concat_index: Index):
def _maybe_check_integrity(self, concat_index: Index) -> None:
if self.verify_integrity:
if not concat_index.is_unique:
overlap = concat_index[concat_index.duplicated()].unique()
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def get_dummies(
data_to_encode = data[columns]

# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name: str):
def check_len(item, name: str) -> None:
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = (
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -2091,7 +2091,7 @@ def _maybe_require_matching_dtypes(
) -> None:
# TODO: why do we do this for AsOfMerge but not the others?

def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int):
def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int) -> None:
if left.dtype != right.dtype:
if isinstance(left.dtype, CategoricalDtype) and isinstance(
right.dtype, CategoricalDtype
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/reshape/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:
return sorted_values
return values

def _make_selectors(self):
def _make_selectors(self) -> None:
new_levels = self.new_index_levels

# make the mask
Expand Down
Loading