Skip to content

Commit a00ca8e

Browse files
jbrockmendelKevin D Smith
authored and
Kevin D Smith
committed
STY: De-privatize imported names (pandas-dev#36235)
1 parent ef5a7e2 commit a00ca8e

File tree

18 files changed

+88
-77
lines changed

18 files changed

+88
-77
lines changed

pandas/_libs/interval.pyx

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ from pandas._libs.tslibs.util cimport (
4646
is_timedelta64_object,
4747
)
4848

49-
_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
49+
VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
5050

5151

5252
cdef class IntervalMixin:
@@ -318,7 +318,7 @@ cdef class Interval(IntervalMixin):
318318
self._validate_endpoint(left)
319319
self._validate_endpoint(right)
320320

321-
if closed not in _VALID_CLOSED:
321+
if closed not in VALID_CLOSED:
322322
raise ValueError(f"invalid option for 'closed': {closed}")
323323
if not left <= right:
324324
raise ValueError("left side of interval must be <= right side")

pandas/core/arrays/_arrow_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import numpy as np
55
import pyarrow
66

7-
from pandas.core.arrays.interval import _VALID_CLOSED
7+
from pandas.core.arrays.interval import VALID_CLOSED
88

99
_pyarrow_version_ge_015 = LooseVersion(pyarrow.__version__) >= LooseVersion("0.15")
1010

@@ -83,7 +83,7 @@ class ArrowIntervalType(pyarrow.ExtensionType):
8383
def __init__(self, subtype, closed):
8484
# attributes need to be set first before calling
8585
# super init (as that calls serialize)
86-
assert closed in _VALID_CLOSED
86+
assert closed in VALID_CLOSED
8787
self._closed = closed
8888
if not isinstance(subtype, pyarrow.DataType):
8989
subtype = pyarrow.type_for_alias(str(subtype))

pandas/core/arrays/interval.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,12 @@
55

66
from pandas._config import get_option
77

8-
from pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds
8+
from pandas._libs.interval import (
9+
VALID_CLOSED,
10+
Interval,
11+
IntervalMixin,
12+
intervals_to_interval_bounds,
13+
)
914
from pandas.compat.numpy import function as nv
1015
from pandas.util._decorators import Appender
1116

@@ -42,7 +47,6 @@
4247
from pandas.core.indexers import check_array_indexer
4348
from pandas.core.indexes.base import ensure_index
4449

45-
_VALID_CLOSED = {"left", "right", "both", "neither"}
4650
_interval_shared_docs = {}
4751

4852
_shared_docs_kwargs = dict(
@@ -475,7 +479,7 @@ def _validate(self):
475479
* left and right have the same missing values
476480
* left is always below right
477481
"""
478-
if self.closed not in _VALID_CLOSED:
482+
if self.closed not in VALID_CLOSED:
479483
msg = f"invalid option for 'closed': {self.closed}"
480484
raise ValueError(msg)
481485
if len(self.left) != len(self.right):
@@ -1012,7 +1016,7 @@ def closed(self):
10121016
)
10131017
)
10141018
def set_closed(self, closed):
1015-
if closed not in _VALID_CLOSED:
1019+
if closed not in VALID_CLOSED:
10161020
msg = f"invalid option for 'closed': {closed}"
10171021
raise ValueError(msg)
10181022

pandas/core/arrays/sparse/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,6 @@
55
BlockIndex,
66
IntIndex,
77
SparseArray,
8-
_make_index,
8+
make_sparse_index,
99
)
1010
from pandas.core.arrays.sparse.dtype import SparseDtype

pandas/core/arrays/sparse/array.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1556,15 +1556,15 @@ def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy
15561556
else:
15571557
indices = mask.nonzero()[0].astype(np.int32)
15581558

1559-
index = _make_index(length, indices, kind)
1559+
index = make_sparse_index(length, indices, kind)
15601560
sparsified_values = arr[mask]
15611561
if dtype is not None:
15621562
sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)
15631563
# TODO: copy
15641564
return sparsified_values, index, fill_value
15651565

15661566

1567-
def _make_index(length, indices, kind):
1567+
def make_sparse_index(length, indices, kind):
15681568

15691569
if kind == "block" or isinstance(kind, BlockIndex):
15701570
locs, lens = splib.get_blocks(indices)

pandas/core/computation/engines.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def _evaluate(self) -> None:
130130
pass
131131

132132

133-
_engines: Dict[str, Type[AbstractEngine]] = {
133+
ENGINES: Dict[str, Type[AbstractEngine]] = {
134134
"numexpr": NumExprEngine,
135135
"python": PythonEngine,
136136
}

pandas/core/computation/eval.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
from pandas._libs.lib import no_default
1010
from pandas.util._validators import validate_bool_kwarg
1111

12-
from pandas.core.computation.engines import _engines
13-
from pandas.core.computation.expr import Expr, _parsers
12+
from pandas.core.computation.engines import ENGINES
13+
from pandas.core.computation.expr import PARSERS, Expr
1414
from pandas.core.computation.parsing import tokenize_string
1515
from pandas.core.computation.scope import ensure_scope
1616

@@ -43,8 +43,8 @@ def _check_engine(engine: Optional[str]) -> str:
4343
if engine is None:
4444
engine = "numexpr" if NUMEXPR_INSTALLED else "python"
4545

46-
if engine not in _engines:
47-
valid_engines = list(_engines.keys())
46+
if engine not in ENGINES:
47+
valid_engines = list(ENGINES.keys())
4848
raise KeyError(
4949
f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
5050
)
@@ -75,9 +75,9 @@ def _check_parser(parser: str):
7575
KeyError
7676
* If an invalid parser is passed
7777
"""
78-
if parser not in _parsers:
78+
if parser not in PARSERS:
7979
raise KeyError(
80-
f"Invalid parser '{parser}' passed, valid parsers are {_parsers.keys()}"
80+
f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
8181
)
8282

8383

@@ -341,7 +341,7 @@ def eval(
341341
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)
342342

343343
# construct the engine and evaluate the parsed expression
344-
eng = _engines[engine]
344+
eng = ENGINES[engine]
345345
eng_inst = eng(parsed_expr)
346346
ret = eng_inst.evaluate()
347347

pandas/core/computation/expr.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -782,7 +782,7 @@ def __init__(
782782
self.env = env or Scope(level=level + 1)
783783
self.engine = engine
784784
self.parser = parser
785-
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
785+
self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
786786
self.terms = self.parse()
787787

788788
@property
@@ -814,4 +814,4 @@ def names(self):
814814
return frozenset(term.name for term in com.flatten(self.terms))
815815

816816

817-
_parsers = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}
817+
PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}

pandas/core/config_init.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -314,9 +314,9 @@ def use_numba_cb(key):
314314

315315

316316
def table_schema_cb(key):
317-
from pandas.io.formats.printing import _enable_data_resource_formatter
317+
from pandas.io.formats.printing import enable_data_resource_formatter
318318

319-
_enable_data_resource_formatter(cf.get_option(key))
319+
enable_data_resource_formatter(cf.get_option(key))
320320

321321

322322
def is_terminal() -> bool:

pandas/core/groupby/generic.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,9 @@
7070
GroupBy,
7171
_agg_template,
7272
_apply_docs,
73-
_group_selection_context,
7473
_transform_template,
7574
get_groupby,
75+
group_selection_context,
7676
)
7777
from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba
7878
from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same
@@ -230,7 +230,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
230230
raise NotImplementedError(
231231
"Numba engine can only be used with a single function."
232232
)
233-
with _group_selection_context(self):
233+
with group_selection_context(self):
234234
data = self._selected_obj
235235
result, index = self._aggregate_with_numba(
236236
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
@@ -685,7 +685,7 @@ def value_counts(
685685
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
686686
):
687687

688-
from pandas.core.reshape.merge import _get_join_indexers
688+
from pandas.core.reshape.merge import get_join_indexers
689689
from pandas.core.reshape.tile import cut
690690

691691
if bins is not None and not np.iterable(bins):
@@ -787,7 +787,7 @@ def value_counts(
787787

788788
right = [diff.cumsum() - 1, codes[-1]]
789789

790-
_, idx = _get_join_indexers(left, right, sort=False, how="left")
790+
_, idx = get_join_indexers(left, right, sort=False, how="left")
791791
out = np.where(idx != -1, out[idx], 0)
792792

793793
if sort:
@@ -942,7 +942,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
942942
raise NotImplementedError(
943943
"Numba engine can only be used with a single function."
944944
)
945-
with _group_selection_context(self):
945+
with group_selection_context(self):
946946
data = self._selected_obj
947947
result, index = self._aggregate_with_numba(
948948
data, func, *args, engine_kwargs=engine_kwargs, **kwargs

pandas/core/groupby/groupby.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -459,9 +459,9 @@ def f(self):
459459

460460

461461
@contextmanager
462-
def _group_selection_context(groupby: "_GroupBy"):
462+
def group_selection_context(groupby: "_GroupBy"):
463463
"""
464-
Set / reset the _group_selection_context.
464+
Set / reset the group_selection_context.
465465
"""
466466
groupby._set_group_selection()
467467
try:
@@ -737,7 +737,7 @@ def pipe(self, func, *args, **kwargs):
737737
def _make_wrapper(self, name: str) -> Callable:
738738
assert name in self._apply_allowlist
739739

740-
with _group_selection_context(self):
740+
with group_selection_context(self):
741741
# need to setup the selection
742742
# as are not passed directly but in the grouper
743743
f = getattr(self._obj_with_exclusions, name)
@@ -868,7 +868,7 @@ def f(g):
868868
# fails on *some* columns, e.g. a numeric operation
869869
# on a string grouper column
870870

871-
with _group_selection_context(self):
871+
with group_selection_context(self):
872872
return self._python_apply_general(f, self._selected_obj)
873873

874874
return result
@@ -994,7 +994,7 @@ def _agg_general(
994994
alias: str,
995995
npfunc: Callable,
996996
):
997-
with _group_selection_context(self):
997+
with group_selection_context(self):
998998
# try a cython aggregation if we can
999999
try:
10001000
return self._cython_agg_general(
@@ -1499,7 +1499,7 @@ def var(self, ddof: int = 1):
14991499
)
15001500
else:
15011501
func = lambda x: x.var(ddof=ddof)
1502-
with _group_selection_context(self):
1502+
with group_selection_context(self):
15031503
return self._python_agg_general(func)
15041504

15051505
@Substitution(name="groupby")
@@ -1658,7 +1658,7 @@ def ohlc(self) -> DataFrame:
16581658

16591659
@doc(DataFrame.describe)
16601660
def describe(self, **kwargs):
1661-
with _group_selection_context(self):
1661+
with group_selection_context(self):
16621662
result = self.apply(lambda x: x.describe(**kwargs))
16631663
if self.axis == 1:
16641664
return result.T
@@ -1963,7 +1963,7 @@ def nth(self, n: Union[int, List[int]], dropna: Optional[str] = None) -> DataFra
19631963
nth_values = list(set(n))
19641964

19651965
nth_array = np.array(nth_values, dtype=np.intp)
1966-
with _group_selection_context(self):
1966+
with group_selection_context(self):
19671967

19681968
mask_left = np.in1d(self._cumcount_array(), nth_array)
19691969
mask_right = np.in1d(
@@ -2226,7 +2226,7 @@ def ngroup(self, ascending: bool = True):
22262226
5 0
22272227
dtype: int64
22282228
"""
2229-
with _group_selection_context(self):
2229+
with group_selection_context(self):
22302230
index = self._selected_obj.index
22312231
result = self._obj_1d_constructor(self.grouper.group_info[0], index)
22322232
if not ascending:
@@ -2287,7 +2287,7 @@ def cumcount(self, ascending: bool = True):
22872287
5 0
22882288
dtype: int64
22892289
"""
2290-
with _group_selection_context(self):
2290+
with group_selection_context(self):
22912291
index = self._selected_obj.index
22922292
cumcounts = self._cumcount_array(ascending=ascending)
22932293
return self._obj_1d_constructor(cumcounts, index)

pandas/core/indexes/base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -3660,15 +3660,15 @@ def _join_multi(self, other, how, return_indexers=True):
36603660
return result
36613661

36623662
def _join_non_unique(self, other, how="left", return_indexers=False):
3663-
from pandas.core.reshape.merge import _get_join_indexers
3663+
from pandas.core.reshape.merge import get_join_indexers
36643664

36653665
# We only get here if dtypes match
36663666
assert self.dtype == other.dtype
36673667

36683668
lvalues = self._get_engine_target()
36693669
rvalues = other._get_engine_target()
36703670

3671-
left_idx, right_idx = _get_join_indexers(
3671+
left_idx, right_idx = get_join_indexers(
36723672
[lvalues], [rvalues], how=how, sort=True
36733673
)
36743674

pandas/core/indexes/interval.py

-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@
5959
if TYPE_CHECKING:
6060
from pandas import CategoricalIndex # noqa:F401
6161

62-
_VALID_CLOSED = {"left", "right", "both", "neither"}
6362
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
6463

6564
_index_doc_kwargs.update(

pandas/core/reshape/merge.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -859,7 +859,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
859859

860860
def _get_join_indexers(self):
861861
""" return the join indexers """
862-
return _get_join_indexers(
862+
return get_join_indexers(
863863
self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how
864864
)
865865

@@ -1298,7 +1298,7 @@ def _validate(self, validate: str):
12981298
raise ValueError("Not a valid argument for validate")
12991299

13001300

1301-
def _get_join_indexers(
1301+
def get_join_indexers(
13021302
left_keys, right_keys, sort: bool = False, how: str = "inner", **kwargs
13031303
):
13041304
"""

pandas/io/formats/printing.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def pprint_thing_encoded(
243243
return value.encode(encoding, errors)
244244

245245

246-
def _enable_data_resource_formatter(enable: bool) -> None:
246+
def enable_data_resource_formatter(enable: bool) -> None:
247247
if "IPython" not in sys.modules:
248248
# definitely not in IPython
249249
return

0 commit comments

Comments
 (0)