Skip to content

ENH: Use find_stack_level in pandas.core #44358

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Nov 12, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pandas/core/accessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import warnings

from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level


class DirNamesMixin:
Expand Down Expand Up @@ -267,7 +268,7 @@ def decorator(accessor):
f"{repr(name)} for type {repr(cls)} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
setattr(cls, name, CachedAccessor(name, accessor))
cls._accessors.add(name)
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/arraylike.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import numpy as np

from pandas._libs import lib
from pandas.util._exceptions import find_stack_level

from pandas.core.construction import extract_array
from pandas.core.ops import (
Expand Down Expand Up @@ -210,7 +211,7 @@ def _maybe_fallback(ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
"or align manually (eg 'df1, df2 = df1.align(df2)') before passing to "
"the ufunc to obtain the future behaviour and silence this warning.",
FutureWarning,
stacklevel=4,
stacklevel=find_stack_level(),
)

# keep the first dataframe of the inputs, other DataFrame/Series is
Expand Down
20 changes: 10 additions & 10 deletions pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ def __init__(
"Allowing scalars in the Categorical constructor is deprecated "
"and will raise in a future version. Use `[value]` instead",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
values = [values]

Expand Down Expand Up @@ -945,7 +945,7 @@ def set_categories(
"a future version. Removing unused categories will always "
"return a new Categorical object.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
else:
inplace = False
Expand Down Expand Up @@ -1045,7 +1045,7 @@ def rename_categories(self, new_categories, inplace=no_default):
"a future version. Removing unused categories will always "
"return a new Categorical object.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
else:
inplace = False
Expand Down Expand Up @@ -1177,7 +1177,7 @@ def add_categories(self, new_categories, inplace=no_default):
"a future version. Removing unused categories will always "
"return a new Categorical object.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
else:
inplace = False
Expand Down Expand Up @@ -1252,7 +1252,7 @@ def remove_categories(self, removals, inplace=no_default):
"a future version. Removing unused categories will always "
"return a new Categorical object.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
else:
inplace = False
Expand Down Expand Up @@ -1327,7 +1327,7 @@ def remove_unused_categories(self, inplace=no_default):
"remove_unused_categories is deprecated and "
"will be removed in a future version.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
else:
inplace = False
Expand Down Expand Up @@ -1884,7 +1884,7 @@ def to_dense(self) -> np.ndarray:
"Categorical.to_dense is deprecated and will be removed in "
"a future version. Use np.asarray(cat) instead.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
return np.asarray(self)

Expand All @@ -1901,7 +1901,7 @@ def _codes(self, value: np.ndarray):
"Setting the codes on a Categorical is deprecated and will raise in "
"a future version. Create a new Categorical object instead",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
) # GH#40606
NDArrayBacked.__init__(self, value, self.dtype)

Expand All @@ -1924,7 +1924,7 @@ def take_nd(self, indexer, allow_fill: bool = False, fill_value=None):
warn(
"Categorical.take_nd is deprecated, use Categorical.take instead",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
return self.take(indexer, allow_fill=allow_fill, fill_value=fill_value)

Expand Down Expand Up @@ -2344,7 +2344,7 @@ def is_dtype_equal(self, other) -> bool:
"Categorical.is_dtype_equal is deprecated and will be removed "
"in a future version",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
try:
return self._categories_match_up_to_permutation(other)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -1206,7 +1206,7 @@ def to_perioddelta(self, freq) -> TimedeltaArray:
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
stacklevel=find_stack_level(),
)
from pandas.core.arrays.timedeltas import TimedeltaArray

Expand Down Expand Up @@ -1373,7 +1373,7 @@ def weekofyear(self):
"weekofyear and return an Index, you may call "
"pd.Int64Index(idx.isocalendar().week)",
FutureWarning,
stacklevel=3,
stacklevel=find_stack_level(),
)
week_series = self.isocalendar().week
if week_series.hasnans:
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ def __init__(
"loses timezone information. Cast to object before "
"sparse to retain timezone information.",
UserWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
data = np.asarray(data, dtype="datetime64[ns]")
if fill_value is NaT:
Expand Down Expand Up @@ -1089,7 +1089,7 @@ def searchsorted(
) -> npt.NDArray[np.intp] | np.intp:

msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=2)
warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
if not is_scalar(v):
v = np.asarray(v)
v = np.asarray(v)
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/arrays/sparse/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
type_t,
)
from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level

from pandas.core.dtypes.base import (
ExtensionDtype,
Expand Down Expand Up @@ -389,7 +390,7 @@ def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
f"values: '{fill_values}'. Picking the first and "
"converting the rest.",
PerformanceWarning,
stacklevel=6,
stacklevel=find_stack_level(),
)

np_dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes]
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
Scalar,
T,
)
from pandas.util._exceptions import find_stack_level

from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
Expand Down Expand Up @@ -175,7 +176,7 @@ def cast_scalar_indexer(val, warn_float: bool = False):
"Indexing with a float is deprecated, and will raise an IndexError "
"in pandas 2.0. You can manually convert to an integer key instead.",
FutureWarning,
stacklevel=3,
stacklevel=find_stack_level(),
)
return int(val)
return val
Expand Down
5 changes: 4 additions & 1 deletion pandas/core/computation/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import numpy as np

from pandas.errors import PerformanceWarning
from pandas.util._exceptions import find_stack_level

from pandas.core.dtypes.generic import (
ABCDataFrame,
Expand Down Expand Up @@ -126,7 +127,9 @@ def _align_core(terms):
f"than an order of magnitude on term {repr(terms[i].name)}, "
f"by more than {ordm:.4g}; performance may suffer."
)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
warnings.warn(
w, category=PerformanceWarning, stacklevel=find_stack_level()
)

f = partial(ti.reindex, reindexer, axis=axis, copy=False)

Expand Down
3 changes: 2 additions & 1 deletion pandas/core/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import warnings

from pandas._libs.lib import no_default
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_bool_kwarg

from pandas.core.computation.engines import ENGINES
Expand Down Expand Up @@ -308,7 +309,7 @@ def eval(
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)

exprs: list[str | BinOp]
Expand Down
4 changes: 3 additions & 1 deletion pandas/core/config_init.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
is_text,
)

from pandas.util._exceptions import find_stack_level

# compute

use_bottleneck_doc = """
Expand Down Expand Up @@ -373,7 +375,7 @@ def _deprecate_negative_int_max_colwidth(key):
"will not be supported in future version. Instead, use None "
"to not limit the column width.",
FutureWarning,
stacklevel=4,
stacklevel=find_stack_level(),
)

cf.register_option(
Expand Down
5 changes: 3 additions & 2 deletions pandas/core/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
DtypeObj,
)
from pandas.errors import IntCastingNaNError
from pandas.util._exceptions import find_stack_level

from pandas.core.dtypes.base import (
ExtensionDtype,
Expand Down Expand Up @@ -538,7 +539,7 @@ def sanitize_array(
"if they cannot be cast losslessly (matching Series behavior). "
"To retain the old behavior, use DataFrame(data).astype(dtype)",
FutureWarning,
stacklevel=4,
stacklevel=find_stack_level(),
)
# GH#40110 until the deprecation is enforced, we _dont_
# ignore the dtype for DataFrame, and _do_ cast even though
Expand Down Expand Up @@ -777,7 +778,7 @@ def _try_cast(
"passed to 'DataFrame', either all columns will be cast to that "
"dtype, or a TypeError will be raised.",
FutureWarning,
stacklevel=7,
stacklevel=find_stack_level(),
)
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/describe.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

from pandas._libs.tslibs import Timestamp
from pandas._typing import NDFrameT
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import validate_percentile

from pandas.core.dtypes.common import (
Expand Down Expand Up @@ -377,7 +378,7 @@ def select_describe_func(
"version of pandas. Specify `datetime_is_numeric=True` to "
"silence this warning and adopt the future behavior now.",
FutureWarning,
stacklevel=5,
stacklevel=find_stack_level(),
)
return describe_timestamp_as_categorical_1d
elif is_timedelta64_dtype(data.dtype):
Expand Down
18 changes: 8 additions & 10 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
"dtype is deprecated. In a future version, this will be cast "
"to object dtype. Pass `fill_value=Timestamp(date_obj)` instead.",
FutureWarning,
stacklevel=8,
stacklevel=find_stack_level(),
)
return dtype, fv
elif isinstance(fill_value, str):
Expand Down Expand Up @@ -1133,7 +1133,7 @@ def astype_nansafe(
"Use .view(...) instead.",
FutureWarning,
# stacklevel chosen to be correct when reached via Series.astype
stacklevel=7,
stacklevel=find_stack_level(),
)
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
Expand All @@ -1155,7 +1155,7 @@ def astype_nansafe(
"Use .view(...) instead.",
FutureWarning,
# stacklevel chosen to be correct when reached via Series.astype
stacklevel=7,
stacklevel=find_stack_level(),
)
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
Expand Down Expand Up @@ -1651,7 +1651,7 @@ def maybe_cast_to_datetime(
"`pd.Series(values).dt.tz_localize(None)` "
"instead.",
FutureWarning,
stacklevel=8,
stacklevel=find_stack_level(),
)
# equiv: dta.view(dtype)
# Note: NOT equivalent to dta.astype(dtype)
Expand Down Expand Up @@ -1691,7 +1691,7 @@ def maybe_cast_to_datetime(
".tz_localize('UTC').tz_convert(dtype.tz) "
"or pd.Series(data.view('int64'), dtype=dtype)",
FutureWarning,
stacklevel=5,
stacklevel=find_stack_level(),
)

value = dta.tz_localize("UTC").tz_convert(dtype.tz)
Expand Down Expand Up @@ -1859,7 +1859,7 @@ def construct_2d_arraylike_from_scalar(
shape = (length, width)

if dtype.kind in ["m", "M"]:
value = maybe_unbox_datetimelike_tz_deprecation(value, dtype, stacklevel=4)
value = maybe_unbox_datetimelike_tz_deprecation(value, dtype)
# error: Non-overlapping equality check (left operand type: "dtype[Any]", right
# operand type: "Type[object]")
elif dtype == object: # type: ignore[comparison-overlap]
Expand Down Expand Up @@ -1932,9 +1932,7 @@ def construct_1d_arraylike_from_scalar(
return subarr


def maybe_unbox_datetimelike_tz_deprecation(
value: Scalar, dtype: DtypeObj, stacklevel: int = 5
):
def maybe_unbox_datetimelike_tz_deprecation(value: Scalar, dtype: DtypeObj):
"""
Wrap maybe_unbox_datetimelike with a check for a timezone-aware Timestamp
along with a timezone-naive datetime64 dtype, which is deprecated.
Expand Down Expand Up @@ -1963,7 +1961,7 @@ def maybe_unbox_datetimelike_tz_deprecation(
"`pd.Series(values).dt.tz_localize(None)` "
"instead.",
FutureWarning,
stacklevel=stacklevel,
stacklevel=find_stack_level(),
)
new_value = value.tz_localize(None)
return maybe_unbox_datetimelike(new_value, dtype)
Expand Down
5 changes: 3 additions & 2 deletions pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
ArrayLike,
DtypeObj,
)
from pandas.util._exceptions import find_stack_level

from pandas.core.dtypes.base import _registry as registry
from pandas.core.dtypes.dtypes import (
Expand Down Expand Up @@ -304,7 +305,7 @@ def is_categorical(arr) -> bool:
"is_categorical is deprecated and will be removed in a future version. "
"Use is_categorical_dtype instead.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)
return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)

Expand Down Expand Up @@ -1378,7 +1379,7 @@ def is_extension_type(arr) -> bool:
"'is_extension_type' is deprecated and will be removed in a future "
"version. Use 'is_extension_array_dtype' instead.",
FutureWarning,
stacklevel=2,
stacklevel=find_stack_level(),
)

if is_categorical_dtype(arr):
Expand Down
Loading