Skip to content

STY: De-privatize imported names #36156

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_libs/hashtable.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ from pandas._libs.missing cimport checknull


cdef int64_t NPY_NAT = util.get_nat()
_SIZE_HINT_LIMIT = (1 << 20) + 7
SIZE_HINT_LIMIT = (1 << 20) + 7


cdef Py_ssize_t _INIT_VEC_CAP = 128
Expand Down Expand Up @@ -176,7 +176,7 @@ def unique_label_indices(const int64_t[:] labels):
ndarray[int64_t, ndim=1] arr
Int64VectorData *ud = idx.data

kh_resize_int64(table, min(n, _SIZE_HINT_LIMIT))
kh_resize_int64(table, min(n, SIZE_HINT_LIMIT))

with nogil:
for i in range(n):
Expand Down
6 changes: 3 additions & 3 deletions pandas/_libs/hashtable_class_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ cdef class {{name}}HashTable(HashTable):
def __cinit__(self, int64_t size_hint=1):
self.table = kh_init_{{dtype}}()
if size_hint is not None:
size_hint = min(size_hint, _SIZE_HINT_LIMIT)
size_hint = min(size_hint, SIZE_HINT_LIMIT)
kh_resize_{{dtype}}(self.table, size_hint)

def __len__(self) -> int:
Expand Down Expand Up @@ -603,7 +603,7 @@ cdef class StringHashTable(HashTable):
def __init__(self, int64_t size_hint=1):
self.table = kh_init_str()
if size_hint is not None:
size_hint = min(size_hint, _SIZE_HINT_LIMIT)
size_hint = min(size_hint, SIZE_HINT_LIMIT)
kh_resize_str(self.table, size_hint)

def __dealloc__(self):
Expand Down Expand Up @@ -916,7 +916,7 @@ cdef class PyObjectHashTable(HashTable):
def __init__(self, int64_t size_hint=1):
self.table = kh_init_pymap()
if size_hint is not None:
size_hint = min(size_hint, _SIZE_HINT_LIMIT)
size_hint = min(size_hint, SIZE_HINT_LIMIT)
kh_resize_pymap(self.table, size_hint)

def __dealloc__(self):
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/hashtable_func_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'):
kh_{{ttype}}_t *table = kh_init_{{ttype}}()
ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool')

kh_resize_{{ttype}}(table, min(n, _SIZE_HINT_LIMIT))
kh_resize_{{ttype}}(table, min(n, SIZE_HINT_LIMIT))

if keep not in ('last', 'first', False):
raise ValueError('keep must be either "first", "last" or False')
Expand Down
8 changes: 4 additions & 4 deletions pandas/_libs/parsers.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ from pandas._libs.khash cimport (
khiter_t,
)

from pandas.compat import _get_lzma_file, _import_lzma
from pandas.compat import get_lzma_file, import_lzma
from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning

from pandas.core.dtypes.common import (
Expand All @@ -82,7 +82,7 @@ from pandas.core.dtypes.common import (
)
from pandas.core.dtypes.concat import union_categoricals

lzma = _import_lzma()
lzma = import_lzma()

cdef:
float64_t INF = <float64_t>np.inf
Expand Down Expand Up @@ -638,9 +638,9 @@ cdef class TextReader:
f'zip file {zip_names}')
elif self.compression == 'xz':
if isinstance(source, str):
source = _get_lzma_file(lzma)(source, 'rb')
source = get_lzma_file(lzma)(source, 'rb')
else:
source = _get_lzma_file(lzma)(filename=source)
source = get_lzma_file(lzma)(filename=source)
else:
raise ValueError(f'Unrecognized compression type: '
f'{self.compression}')
Expand Down
8 changes: 4 additions & 4 deletions pandas/_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.compat import get_lzma_file, import_lzma

from pandas.core.dtypes.common import (
is_bool,
Expand Down Expand Up @@ -70,7 +70,7 @@
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing

lzma = _import_lzma()
lzma = import_lzma()

_N = 30
_K = 4
Expand Down Expand Up @@ -243,7 +243,7 @@ def decompress_file(path, compression):
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
Expand Down Expand Up @@ -288,7 +288,7 @@ def write_to_compressed(compression, path, data, dest="test"):
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")

Expand Down
4 changes: 2 additions & 2 deletions pandas/compat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def is_platform_mac() -> bool:
return sys.platform == "darwin"


def _import_lzma():
def import_lzma():
"""
Importing the `lzma` module.

Expand All @@ -97,7 +97,7 @@ def _import_lzma():
warnings.warn(msg)


def _get_lzma_file(lzma):
def get_lzma_file(lzma):
"""
Importing the `LZMAFile` class from the `lzma` module.

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray:
return f(comps, values)


def _factorize_array(
def factorize_array(
values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Expand Down Expand Up @@ -671,7 +671,7 @@ def factorize(
else:
na_value = None

codes, uniques = _factorize_array(
codes, uniques = factorize_array(
values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value
)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from pandas.core.dtypes.missing import isna

from pandas.core import ops
from pandas.core.algorithms import _factorize_array, unique
from pandas.core.algorithms import factorize_array, unique
from pandas.core.missing import backfill_1d, pad_1d
from pandas.core.sorting import nargminmax, nargsort

Expand Down Expand Up @@ -845,7 +845,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray"
# Complete control over factorization.
arr, na_value = self._values_for_factorize()

codes, uniques = _factorize_array(
codes, uniques = factorize_array(
arr, na_sentinel=na_sentinel, na_value=na_value
)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/arrays/masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from pandas.core.dtypes.missing import isna, notna

from pandas.core import nanops
from pandas.core.algorithms import _factorize_array, take
from pandas.core.algorithms import factorize_array, take
from pandas.core.array_algos import masked_reductions
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.indexers import check_array_indexer
Expand Down Expand Up @@ -287,7 +287,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]:
arr = self._data
mask = self._mask

codes, uniques = _factorize_array(arr, na_sentinel=na_sentinel, mask=mask)
codes, uniques = factorize_array(arr, na_sentinel=na_sentinel, mask=mask)

# the hashtables don't handle all different types of bits
uniques = uniques.astype(self.dtype.numpy_dtype, copy=False)
Expand Down
10 changes: 5 additions & 5 deletions pandas/core/computation/check.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from pandas.compat._optional import import_optional_dependency

ne = import_optional_dependency("numexpr", raise_on_missing=False, on_version="warn")
_NUMEXPR_INSTALLED = ne is not None
if _NUMEXPR_INSTALLED:
_NUMEXPR_VERSION = ne.__version__
NUMEXPR_INSTALLED = ne is not None
if NUMEXPR_INSTALLED:
NUMEXPR_VERSION = ne.__version__
else:
_NUMEXPR_VERSION = None
NUMEXPR_VERSION = None

__all__ = ["_NUMEXPR_INSTALLED", "_NUMEXPR_VERSION"]
__all__ = ["NUMEXPR_INSTALLED", "NUMEXPR_VERSION"]
6 changes: 3 additions & 3 deletions pandas/core/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@ def _check_engine(engine: Optional[str]) -> str:
str
Engine name.
"""
from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.computation.check import NUMEXPR_INSTALLED

if engine is None:
engine = "numexpr" if _NUMEXPR_INSTALLED else "python"
engine = "numexpr" if NUMEXPR_INSTALLED else "python"

if engine not in _engines:
valid_engines = list(_engines.keys())
Expand All @@ -53,7 +53,7 @@ def _check_engine(engine: Optional[str]) -> str:
# that won't necessarily be import-able)
# Could potentially be done on engine instantiation
if engine == "numexpr":
if not _NUMEXPR_INSTALLED:
if not NUMEXPR_INSTALLED:
raise ImportError(
"'numexpr' is not installed or an unsupported version. Cannot use "
"engine='numexpr' for query/eval if 'numexpr' is not installed"
Expand Down
10 changes: 5 additions & 5 deletions pandas/core/computation/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@

from pandas.core.dtypes.generic import ABCDataFrame

from pandas.core.computation.check import _NUMEXPR_INSTALLED
from pandas.core.computation.check import NUMEXPR_INSTALLED
from pandas.core.ops import roperator

if _NUMEXPR_INSTALLED:
if NUMEXPR_INSTALLED:
import numexpr as ne

_TEST_MODE = None
_TEST_RESULT: List[bool] = list()
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_USE_NUMEXPR = NUMEXPR_INSTALLED
_evaluate = None
_where = None

Expand All @@ -40,7 +40,7 @@
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
if NUMEXPR_INSTALLED:
_USE_NUMEXPR = v

# choose what we are going to do
Expand All @@ -53,7 +53,7 @@ def set_use_numexpr(v=True):
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,11 +600,11 @@ def __repr__(self) -> str:

class FuncNode:
def __init__(self, name: str):
from pandas.core.computation.check import _NUMEXPR_INSTALLED, _NUMEXPR_VERSION
from pandas.core.computation.check import NUMEXPR_INSTALLED, NUMEXPR_VERSION

if name not in _mathops or (
_NUMEXPR_INSTALLED
and _NUMEXPR_VERSION < LooseVersion("2.6.9")
NUMEXPR_INSTALLED
and NUMEXPR_VERSION < LooseVersion("2.6.9")
and name in ("floor", "ceil")
):
raise ValueError(f'"{name}" is not a supported function')
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -5257,7 +5257,7 @@ def duplicated(
4 True
dtype: bool
"""
from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64
from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64

from pandas.core.sorting import get_group_index

Expand All @@ -5266,7 +5266,7 @@ def duplicated(

def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -1342,9 +1342,9 @@ def format(
)

if adjoin:
from pandas.io.formats.format import _get_adjustment
from pandas.io.formats.format import get_adjustment

adj = _get_adjustment()
adj = get_adjustment()
return adj.adjoin(space, *result_levels).split("\n")
else:
return result_levels
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/internals/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
IntBlock,
ObjectBlock,
TimeDeltaBlock,
_safe_reshape,
make_block,
safe_reshape,
)
from pandas.core.internals.concat import concatenate_block_managers
from pandas.core.internals.managers import (
Expand All @@ -33,7 +33,7 @@
"IntBlock",
"ObjectBlock",
"TimeDeltaBlock",
"_safe_reshape",
"safe_reshape",
"make_block",
"BlockManager",
"SingleBlockManager",
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1673,7 +1673,7 @@ def putmask(
if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask):
new = new[mask]

mask = _safe_reshape(mask, new_values.shape)
mask = safe_reshape(mask, new_values.shape)

new_values[mask] = new
return [self.make_block(values=new_values)]
Expand Down Expand Up @@ -2815,7 +2815,7 @@ def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike:
return values


def _safe_reshape(arr, new_shape):
def safe_reshape(arr, new_shape):
"""
If possible, reshape `arr` to have shape `new_shape`,
with a couple of exceptions (see gh-13012):
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,10 @@
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_safe_reshape,
extend_blocks,
get_block_type,
make_block,
safe_reshape,
)
from pandas.core.internals.ops import blockwise_all, operate_blockwise

Expand Down Expand Up @@ -1015,7 +1015,7 @@ def value_getitem(placement):

else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
value = safe_reshape(value, (1,) + value.shape)

def value_getitem(placement):
return value
Expand Down Expand Up @@ -1138,7 +1138,7 @@ def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False):

if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype):
# TODO(EA2D): special case not needed with 2D EAs
value = _safe_reshape(value, (1,) + value.shape)
value = safe_reshape(value, (1,) + value.shape)

block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ def compress_group_index(group_index, sort: bool = True):
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
size_hint = min(len(group_index), hashtable.SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)

group_index = ensure_int64(group_index)
Expand Down
Loading