Skip to content

Commit 7cba64e

Browse files
authored
TYP: more simple return types from ruff (#56628)
TYP: more return types from ruff
1 parent 0643a18 commit 7cba64e

28 files changed

+55
-47
lines changed

pandas/core/apply.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -827,7 +827,7 @@ def generate_numba_apply_func(
827827
def apply_with_numba(self):
828828
pass
829829

830-
def validate_values_for_numba(self):
830+
def validate_values_for_numba(self) -> None:
831831
# Validate column dtyps all OK
832832
for colname, dtype in self.obj.dtypes.items():
833833
if not is_numeric_dtype(dtype):

pandas/core/array_algos/replace.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def compare_or_regex_search(
6767

6868
def _check_comparison_types(
6969
result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
70-
):
70+
) -> None:
7171
"""
7272
Raises an error if the two arrays (a,b) cannot be compared.
7373
Otherwise, returns the comparison result as expected.

pandas/core/arrays/arrow/accessors.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(self, data, validation_msg: str) -> None:
3939
def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
4040
pass
4141

42-
def _validate(self, data):
42+
def _validate(self, data) -> None:
4343
dtype = data.dtype
4444
if not isinstance(dtype, ArrowDtype):
4545
# Raise AttributeError so that inspect can handle non-struct Series.

pandas/core/arrays/arrow/extension_types.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def to_pandas_dtype(self) -> IntervalDtype:
135135
"""
136136

137137

138-
def patch_pyarrow():
138+
def patch_pyarrow() -> None:
139139
# starting from pyarrow 14.0.1, it has its own mechanism
140140
if not pa_version_under14p1:
141141
return

pandas/core/arrays/categorical.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -2164,7 +2164,9 @@ def __contains__(self, key) -> bool:
21642164
# ------------------------------------------------------------------
21652165
# Rendering Methods
21662166

2167-
def _formatter(self, boxed: bool = False):
2167+
# error: Return type "None" of "_formatter" incompatible with return
2168+
# type "Callable[[Any], str | None]" in supertype "ExtensionArray"
2169+
def _formatter(self, boxed: bool = False) -> None: # type: ignore[override]
21682170
# Returning None here will cause format_array to do inference.
21692171
return None
21702172

@@ -2890,7 +2892,7 @@ def __init__(self, data) -> None:
28902892
self._freeze()
28912893

28922894
@staticmethod
2893-
def _validate(data):
2895+
def _validate(data) -> None:
28942896
if not isinstance(data.dtype, CategoricalDtype):
28952897
raise AttributeError("Can only use .cat accessor with a 'category' dtype")
28962898

pandas/core/arrays/datetimelike.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -2058,7 +2058,7 @@ def freq(self, value) -> None:
20582058
self._freq = value
20592059

20602060
@final
2061-
def _maybe_pin_freq(self, freq, validate_kwds: dict):
2061+
def _maybe_pin_freq(self, freq, validate_kwds: dict) -> None:
20622062
"""
20632063
Constructor helper to pin the appropriate `freq` attribute. Assumes
20642064
that self._freq is currently set to any freq inferred in
@@ -2092,7 +2092,7 @@ def _maybe_pin_freq(self, freq, validate_kwds: dict):
20922092

20932093
@final
20942094
@classmethod
2095-
def _validate_frequency(cls, index, freq: BaseOffset, **kwargs):
2095+
def _validate_frequency(cls, index, freq: BaseOffset, **kwargs) -> None:
20962096
"""
20972097
Validate that a frequency is compatible with the values of a given
20982098
Datetime Array/Index or Timedelta Array/Index

pandas/core/arrays/sparse/accessor.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def __init__(self, data=None) -> None:
3030
self._parent = data
3131
self._validate(data)
3232

33-
def _validate(self, data):
33+
def _validate(self, data) -> None:
3434
raise NotImplementedError
3535

3636

@@ -50,7 +50,7 @@ class SparseAccessor(BaseAccessor, PandasDelegate):
5050
array([2, 2, 2])
5151
"""
5252

53-
def _validate(self, data):
53+
def _validate(self, data) -> None:
5454
if not isinstance(data.dtype, SparseDtype):
5555
raise AttributeError(self._validation_msg)
5656

@@ -243,7 +243,7 @@ class SparseFrameAccessor(BaseAccessor, PandasDelegate):
243243
0.5
244244
"""
245245

246-
def _validate(self, data):
246+
def _validate(self, data) -> None:
247247
dtypes = data.dtypes
248248
if not all(isinstance(t, SparseDtype) for t in dtypes):
249249
raise AttributeError(self._validation_msg)

pandas/core/arrays/sparse/array.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -1830,7 +1830,9 @@ def __repr__(self) -> str:
18301830
pp_index = printing.pprint_thing(self.sp_index)
18311831
return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"
18321832

1833-
def _formatter(self, boxed: bool = False):
1833+
# error: Return type "None" of "_formatter" incompatible with return
1834+
# type "Callable[[Any], str | None]" in supertype "ExtensionArray"
1835+
def _formatter(self, boxed: bool = False) -> None: # type: ignore[override]
18341836
# Defer to the formatter from the GenericArrayFormatter calling us.
18351837
# This will infer the correct formatter from the dtype of the values.
18361838
return None

pandas/core/arrays/sparse/scipy_sparse.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
)
2828

2929

30-
def _check_is_partition(parts: Iterable, whole: Iterable):
30+
def _check_is_partition(parts: Iterable, whole: Iterable) -> None:
3131
whole = set(whole)
3232
parts = [set(x) for x in parts]
3333
if set.intersection(*parts) != set():

pandas/core/arrays/string_.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,7 @@ def __init__(self, values, copy: bool = False) -> None:
364364
self._validate()
365365
NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python"))
366366

367-
def _validate(self):
367+
def _validate(self) -> None:
368368
"""Validate that we only store NA or strings."""
369369
if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
370370
raise ValueError("StringArray requires a sequence of strings or pandas.NA")

pandas/core/computation/eval.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def _check_engine(engine: str | None) -> str:
7272
return engine
7373

7474

75-
def _check_parser(parser: str):
75+
def _check_parser(parser: str) -> None:
7676
"""
7777
Make sure a valid parser is passed.
7878
@@ -91,7 +91,7 @@ def _check_parser(parser: str):
9191
)
9292

9393

94-
def _check_resolvers(resolvers):
94+
def _check_resolvers(resolvers) -> None:
9595
if resolvers is not None:
9696
for resolver in resolvers:
9797
if not hasattr(resolver, "__getitem__"):
@@ -102,7 +102,7 @@ def _check_resolvers(resolvers):
102102
)
103103

104104

105-
def _check_expression(expr):
105+
def _check_expression(expr) -> None:
106106
"""
107107
Make sure an expression is not an empty string
108108
@@ -149,7 +149,7 @@ def _convert_expression(expr) -> str:
149149
return s
150150

151151

152-
def _check_for_locals(expr: str, stack_level: int, parser: str):
152+
def _check_for_locals(expr: str, stack_level: int, parser: str) -> None:
153153
at_top_of_stack = stack_level == 0
154154
not_pandas_parser = parser != "pandas"
155155

pandas/core/computation/ops.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -491,7 +491,7 @@ def stringify(value):
491491
v = v.tz_convert("UTC")
492492
self.lhs.update(v)
493493

494-
def _disallow_scalar_only_bool_ops(self):
494+
def _disallow_scalar_only_bool_ops(self) -> None:
495495
rhs = self.rhs
496496
lhs = self.lhs
497497

pandas/core/dtypes/cast.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
231231
return value
232232

233233

234-
def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
234+
def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None:
235235
"""
236236
numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
237237
vice-versa, but we do not want to allow this, so we need to

pandas/core/frame.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -4316,7 +4316,7 @@ def _setitem_array(self, key, value):
43164316
else:
43174317
self._iset_not_inplace(key, value)
43184318

4319-
def _iset_not_inplace(self, key, value):
4319+
def _iset_not_inplace(self, key, value) -> None:
43204320
# GH#39510 when setting with df[key] = obj with a list-like key and
43214321
# list-like value, we iterate over those listlikes and set columns
43224322
# one at a time. This is different from dispatching to
@@ -4360,7 +4360,7 @@ def igetitem(obj, i: int):
43604360
finally:
43614361
self.columns = orig_columns
43624362

4363-
def _setitem_frame(self, key, value):
4363+
def _setitem_frame(self, key, value) -> None:
43644364
# support boolean setting with DataFrame input, e.g.
43654365
# df[df > df2] = 0
43664366
if isinstance(key, np.ndarray):

pandas/core/generic.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -4394,7 +4394,7 @@ def _check_is_chained_assignment_possible(self) -> bool_t:
43944394
return False
43954395

43964396
@final
4397-
def _check_setitem_copy(self, t: str = "setting", force: bool_t = False):
4397+
def _check_setitem_copy(self, t: str = "setting", force: bool_t = False) -> None:
43984398
"""
43994399
44004400
Parameters
@@ -4510,7 +4510,7 @@ def __delitem__(self, key) -> None:
45104510
# Unsorted
45114511

45124512
@final
4513-
def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t):
4513+
def _check_inplace_and_allows_duplicate_labels(self, inplace: bool_t) -> None:
45144514
if inplace and not self.flags.allows_duplicate_labels:
45154515
raise ValueError(
45164516
"Cannot specify 'inplace=True' when "

pandas/core/indexes/base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -3209,7 +3209,7 @@ def _get_reconciled_name_object(self, other):
32093209
return self
32103210

32113211
@final
3212-
def _validate_sort_keyword(self, sort):
3212+
def _validate_sort_keyword(self, sort) -> None:
32133213
if sort not in [None, False, True]:
32143214
raise ValueError(
32153215
"The 'sort' keyword only takes the values of "
@@ -6051,7 +6051,7 @@ def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
60516051
# by RangeIndex, MultIIndex
60526052
return self._data.argsort(*args, **kwargs)
60536053

6054-
def _check_indexing_error(self, key):
6054+
def _check_indexing_error(self, key) -> None:
60556055
if not is_scalar(key):
60566056
# if key is not a scalar, directly raise an error (the code below
60576057
# would convert to numpy arrays and raise later any way) - GH29926

pandas/core/indexes/multi.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1571,7 +1571,7 @@ def _format_multi(
15711571
def _get_names(self) -> FrozenList:
15721572
return FrozenList(self._names)
15731573

1574-
def _set_names(self, names, *, level=None, validate: bool = True):
1574+
def _set_names(self, names, *, level=None, validate: bool = True) -> None:
15751575
"""
15761576
Set new names on index. Each name has to be a hashable type.
15771577

pandas/core/indexing.py

+8-6
Original file line numberDiff line numberDiff line change
@@ -911,7 +911,7 @@ def __setitem__(self, key, value) -> None:
911911
iloc = self if self.name == "iloc" else self.obj.iloc
912912
iloc._setitem_with_indexer(indexer, value, self.name)
913913

914-
def _validate_key(self, key, axis: AxisInt):
914+
def _validate_key(self, key, axis: AxisInt) -> None:
915915
"""
916916
Ensure that key is valid for current indexer.
917917
@@ -1225,7 +1225,7 @@ class _LocIndexer(_LocationIndexer):
12251225
# Key Checks
12261226

12271227
@doc(_LocationIndexer._validate_key)
1228-
def _validate_key(self, key, axis: Axis):
1228+
def _validate_key(self, key, axis: Axis) -> None:
12291229
# valid for a collection of labels (we check their presence later)
12301230
# slice of labels (where start-end in labels)
12311231
# slice of integers (only if in the labels)
@@ -1572,7 +1572,7 @@ class _iLocIndexer(_LocationIndexer):
15721572
# -------------------------------------------------------------------
15731573
# Key Checks
15741574

1575-
def _validate_key(self, key, axis: AxisInt):
1575+
def _validate_key(self, key, axis: AxisInt) -> None:
15761576
if com.is_bool_indexer(key):
15771577
if hasattr(key, "index") and isinstance(key.index, Index):
15781578
if key.index.inferred_type == "integer":
@@ -1783,7 +1783,7 @@ def _get_setitem_indexer(self, key):
17831783

17841784
# -------------------------------------------------------------------
17851785

1786-
def _setitem_with_indexer(self, indexer, value, name: str = "iloc"):
1786+
def _setitem_with_indexer(self, indexer, value, name: str = "iloc") -> None:
17871787
"""
17881788
_setitem_with_indexer is for setting values on a Series/DataFrame
17891789
using positional indexers.
@@ -2038,7 +2038,7 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str):
20382038
for loc in ilocs:
20392039
self._setitem_single_column(loc, value, pi)
20402040

2041-
def _setitem_with_indexer_2d_value(self, indexer, value):
2041+
def _setitem_with_indexer_2d_value(self, indexer, value) -> None:
20422042
# We get here with np.ndim(value) == 2, excluding DataFrame,
20432043
# which goes through _setitem_with_indexer_frame_value
20442044
pi = indexer[0]
@@ -2060,7 +2060,9 @@ def _setitem_with_indexer_2d_value(self, indexer, value):
20602060
value_col = value_col.tolist()
20612061
self._setitem_single_column(loc, value_col, pi)
20622062

2063-
def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str):
2063+
def _setitem_with_indexer_frame_value(
2064+
self, indexer, value: DataFrame, name: str
2065+
) -> None:
20642066
ilocs = self._ensure_iterable_column_indexer(indexer[1])
20652067

20662068
sub_indexer = list(indexer)

pandas/core/internals/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353

5454

5555
class _AlreadyWarned:
56-
def __init__(self):
56+
def __init__(self) -> None:
5757
# This class is used on the manager level to the block level to
5858
# ensure that we warn only once. The block method can update the
5959
# warned_already option without returning a value to keep the

pandas/core/internals/managers.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -1940,13 +1940,15 @@ def _post_setstate(self) -> None:
19401940
def _block(self) -> Block:
19411941
return self.blocks[0]
19421942

1943+
# error: Cannot override writeable attribute with read-only property
19431944
@property
1944-
def _blknos(self):
1945+
def _blknos(self) -> None: # type: ignore[override]
19451946
"""compat with BlockManager"""
19461947
return None
19471948

1949+
# error: Cannot override writeable attribute with read-only property
19481950
@property
1949-
def _blklocs(self):
1951+
def _blklocs(self) -> None: # type: ignore[override]
19501952
"""compat with BlockManager"""
19511953
return None
19521954

pandas/core/ops/array_ops.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ def maybe_prepare_scalar_for_op(obj, shape: Shape):
591591
}
592592

593593

594-
def _bool_arith_check(op, a: np.ndarray, b):
594+
def _bool_arith_check(op, a: np.ndarray, b) -> None:
595595
"""
596596
In contrast to numpy, pandas raises an error for certain operations
597597
with booleans.

pandas/core/reshape/concat.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -765,7 +765,7 @@ def _get_concat_axis(self) -> Index:
765765

766766
return concat_axis
767767

768-
def _maybe_check_integrity(self, concat_index: Index):
768+
def _maybe_check_integrity(self, concat_index: Index) -> None:
769769
if self.verify_integrity:
770770
if not concat_index.is_unique:
771771
overlap = concat_index[concat_index.duplicated()].unique()

pandas/core/reshape/encoding.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ def get_dummies(
169169
data_to_encode = data[columns]
170170

171171
# validate prefixes and separator to avoid silently dropping cols
172-
def check_len(item, name: str):
172+
def check_len(item, name: str) -> None:
173173
if is_list_like(item):
174174
if not len(item) == data_to_encode.shape[1]:
175175
len_msg = (

pandas/core/reshape/merge.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2091,7 +2091,7 @@ def _maybe_require_matching_dtypes(
20912091
) -> None:
20922092
# TODO: why do we do this for AsOfMerge but not the others?
20932093

2094-
def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int):
2094+
def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int) -> None:
20952095
if left.dtype != right.dtype:
20962096
if isinstance(left.dtype, CategoricalDtype) and isinstance(
20972097
right.dtype, CategoricalDtype

pandas/core/reshape/reshape.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ def _make_sorted_values(self, values: np.ndarray) -> np.ndarray:
188188
return sorted_values
189189
return values
190190

191-
def _make_selectors(self):
191+
def _make_selectors(self) -> None:
192192
new_levels = self.new_index_levels
193193

194194
# make the mask

0 commit comments

Comments
 (0)