Skip to content

Commit 75fa607

Browse files
tqa236pmhatre1
authored andcommitted
CLN: Remove unused functions (pandas-dev#57844)
1 parent d6d97d6 commit 75fa607

File tree

4 files changed

+0
-129
lines changed

4 files changed

+0
-129
lines changed

pandas/compat/numpy/function.py

-41
Original file line numberDiff line numberDiff line change
@@ -258,10 +258,6 @@ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
258258
MINMAX_DEFAULTS, fname="max", method="both", max_fname_arg_count=1
259259
)
260260

261-
RESHAPE_DEFAULTS: dict[str, str] = {"order": "C"}
262-
validate_reshape = CompatValidator(
263-
RESHAPE_DEFAULTS, fname="reshape", method="both", max_fname_arg_count=1
264-
)
265261

266262
REPEAT_DEFAULTS: dict[str, Any] = {"axis": None}
267263
validate_repeat = CompatValidator(
@@ -273,12 +269,6 @@ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
273269
ROUND_DEFAULTS, fname="round", method="both", max_fname_arg_count=1
274270
)
275271

276-
SORT_DEFAULTS: dict[str, int | str | None] = {}
277-
SORT_DEFAULTS["axis"] = -1
278-
SORT_DEFAULTS["kind"] = "quicksort"
279-
SORT_DEFAULTS["order"] = None
280-
validate_sort = CompatValidator(SORT_DEFAULTS, fname="sort", method="kwargs")
281-
282272
STAT_FUNC_DEFAULTS: dict[str, Any | None] = {}
283273
STAT_FUNC_DEFAULTS["dtype"] = None
284274
STAT_FUNC_DEFAULTS["out"] = None
@@ -324,20 +314,6 @@ def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool:
324314
validate_take = CompatValidator(TAKE_DEFAULTS, fname="take", method="kwargs")
325315

326316

327-
def validate_take_with_convert(convert: ndarray | bool | None, args, kwargs) -> bool:
328-
"""
329-
If this function is called via the 'numpy' library, the third parameter in
330-
its signature is 'axis', which takes either an ndarray or 'None', so check
331-
if the 'convert' parameter is either an instance of ndarray or is None
332-
"""
333-
if isinstance(convert, ndarray) or convert is None:
334-
args = (convert,) + args
335-
convert = True
336-
337-
validate_take(args, kwargs, max_fname_arg_count=3, method="both")
338-
return convert
339-
340-
341317
TRANSPOSE_DEFAULTS = {"axes": None}
342318
validate_transpose = CompatValidator(
343319
TRANSPOSE_DEFAULTS, fname="transpose", method="both", max_fname_arg_count=0
@@ -362,23 +338,6 @@ def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None:
362338
)
363339

364340

365-
RESAMPLER_NUMPY_OPS = ("min", "max", "sum", "prod", "mean", "std", "var")
366-
367-
368-
def validate_resampler_func(method: str, args, kwargs) -> None:
369-
"""
370-
'args' and 'kwargs' should be empty because all of their necessary
371-
parameters are explicitly listed in the function signature
372-
"""
373-
if len(args) + len(kwargs) > 0:
374-
if method in RESAMPLER_NUMPY_OPS:
375-
raise UnsupportedFunctionCall(
376-
"numpy operations are not valid with resample. "
377-
f"Use .resample(...).{method}() instead"
378-
)
379-
raise TypeError("too many arguments passed in")
380-
381-
382341
def validate_minmax_axis(axis: AxisInt | None, ndim: int = 1) -> None:
383342
"""
384343
Ensure that the axis argument passed to min, max, argmin, or argmax is zero

pandas/core/internals/blocks.py

-19
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
from __future__ import annotations
22

3-
from functools import wraps
43
import inspect
54
import re
65
from typing import (
@@ -31,7 +30,6 @@
3130
AxisInt,
3231
DtypeBackend,
3332
DtypeObj,
34-
F,
3533
FillnaOptions,
3634
IgnoreRaise,
3735
InterpolateOptions,
@@ -131,23 +129,6 @@
131129
_dtype_obj = np.dtype("object")
132130

133131

134-
def maybe_split(meth: F) -> F:
135-
"""
136-
If we have a multi-column block, split and operate block-wise. Otherwise
137-
use the original method.
138-
"""
139-
140-
@wraps(meth)
141-
def newfunc(self, *args, **kwargs) -> list[Block]:
142-
if self.ndim == 1 or self.shape[0] == 1:
143-
return meth(self, *args, **kwargs)
144-
else:
145-
# Split and operate column-by-column
146-
return self.split_and_operate(meth, *args, **kwargs)
147-
148-
return cast(F, newfunc)
149-
150-
151132
class Block(PandasObject, libinternals.Block):
152133
"""
153134
Canonical n-dimensional unit of homogeneous dtype contained in a pandas

pandas/core/methods/describe.py

-49
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818

1919
import numpy as np
2020

21-
from pandas._libs.tslibs import Timestamp
2221
from pandas._typing import (
2322
DtypeObj,
2423
NDFrameT,
@@ -288,54 +287,6 @@ def describe_categorical_1d(
288287
return Series(result, index=names, name=data.name, dtype=dtype)
289288

290289

291-
def describe_timestamp_as_categorical_1d(
292-
data: Series,
293-
percentiles_ignored: Sequence[float],
294-
) -> Series:
295-
"""Describe series containing timestamp data treated as categorical.
296-
297-
Parameters
298-
----------
299-
data : Series
300-
Series to be described.
301-
percentiles_ignored : list-like of numbers
302-
Ignored, but in place to unify interface.
303-
"""
304-
names = ["count", "unique"]
305-
objcounts = data.value_counts()
306-
count_unique = len(objcounts[objcounts != 0])
307-
result: list[float | Timestamp] = [data.count(), count_unique]
308-
dtype = None
309-
if count_unique > 0:
310-
top, freq = objcounts.index[0], objcounts.iloc[0]
311-
tz = data.dt.tz
312-
asint = data.dropna().values.view("i8")
313-
top = Timestamp(top)
314-
if top.tzinfo is not None and tz is not None:
315-
# Don't tz_localize(None) if key is already tz-aware
316-
top = top.tz_convert(tz)
317-
else:
318-
top = top.tz_localize(tz)
319-
names += ["top", "freq", "first", "last"]
320-
result += [
321-
top,
322-
freq,
323-
Timestamp(asint.min(), tz=tz),
324-
Timestamp(asint.max(), tz=tz),
325-
]
326-
327-
# If the DataFrame is empty, set 'top' and 'freq' to None
328-
# to maintain output shape consistency
329-
else:
330-
names += ["top", "freq"]
331-
result += [np.nan, np.nan]
332-
dtype = "object"
333-
334-
from pandas import Series
335-
336-
return Series(result, index=names, name=data.name, dtype=dtype)
337-
338-
339290
def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series:
340291
"""Describe series containing datetime64 dtype.
341292

pandas/core/sorting.py

-20
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,9 @@
22

33
from __future__ import annotations
44

5-
from collections import defaultdict
65
from typing import (
76
TYPE_CHECKING,
87
Callable,
9-
DefaultDict,
108
cast,
119
)
1210

@@ -34,7 +32,6 @@
3432
if TYPE_CHECKING:
3533
from collections.abc import (
3634
Hashable,
37-
Iterable,
3835
Sequence,
3936
)
4037

@@ -592,23 +589,6 @@ def ensure_key_mapped(
592589
return result
593590

594591

595-
def get_flattened_list(
596-
comp_ids: npt.NDArray[np.intp],
597-
ngroups: int,
598-
levels: Iterable[Index],
599-
labels: Iterable[np.ndarray],
600-
) -> list[tuple]:
601-
"""Map compressed group id -> key tuple."""
602-
comp_ids = comp_ids.astype(np.int64, copy=False)
603-
arrays: DefaultDict[int, list[int]] = defaultdict(list)
604-
for labs, level in zip(labels, levels):
605-
table = hashtable.Int64HashTable(ngroups)
606-
table.map_keys_to_values(comp_ids, labs.astype(np.int64, copy=False))
607-
for i in range(ngroups):
608-
arrays[i].append(level[table.get_item(i)])
609-
return [tuple(array) for array in arrays.values()]
610-
611-
612592
def get_indexer_dict(
613593
label_list: list[np.ndarray], keys: list[Index]
614594
) -> dict[Hashable, npt.NDArray[np.intp]]:

0 commit comments

Comments
 (0)