diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index ffaf6d6505955..5a0cddb0af197 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -56,7 +56,7 @@ from pandas._libs.missing cimport checknull cdef int64_t NPY_NAT = util.get_nat() -_SIZE_HINT_LIMIT = (1 << 20) + 7 +SIZE_HINT_LIMIT = (1 << 20) + 7 cdef Py_ssize_t _INIT_VEC_CAP = 128 @@ -176,7 +176,7 @@ def unique_label_indices(const int64_t[:] labels): ndarray[int64_t, ndim=1] arr Int64VectorData *ud = idx.data - kh_resize_int64(table, min(n, _SIZE_HINT_LIMIT)) + kh_resize_int64(table, min(n, SIZE_HINT_LIMIT)) with nogil: for i in range(n): diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index e0e026fe7cb5e..5e4da96d57e42 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -268,7 +268,7 @@ cdef class {{name}}HashTable(HashTable): def __cinit__(self, int64_t size_hint=1): self.table = kh_init_{{dtype}}() if size_hint is not None: - size_hint = min(size_hint, _SIZE_HINT_LIMIT) + size_hint = min(size_hint, SIZE_HINT_LIMIT) kh_resize_{{dtype}}(self.table, size_hint) def __len__(self) -> int: @@ -603,7 +603,7 @@ cdef class StringHashTable(HashTable): def __init__(self, int64_t size_hint=1): self.table = kh_init_str() if size_hint is not None: - size_hint = min(size_hint, _SIZE_HINT_LIMIT) + size_hint = min(size_hint, SIZE_HINT_LIMIT) kh_resize_str(self.table, size_hint) def __dealloc__(self): @@ -916,7 +916,7 @@ cdef class PyObjectHashTable(HashTable): def __init__(self, int64_t size_hint=1): self.table = kh_init_pymap() if size_hint is not None: - size_hint = min(size_hint, _SIZE_HINT_LIMIT) + size_hint = min(size_hint, SIZE_HINT_LIMIT) kh_resize_pymap(self.table, size_hint) def __dealloc__(self): diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 0cc0a6b192df5..fcd081f563f92 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -138,7 +138,7 @@ def duplicated_{{dtype}}(const {{c_type}}[:] values, object keep='first'): kh_{{ttype}}_t *table = kh_init_{{ttype}}() ndarray[uint8_t, ndim=1, cast=True] out = np.empty(n, dtype='bool') - kh_resize_{{ttype}}(table, min(n, _SIZE_HINT_LIMIT)) + kh_resize_{{ttype}}(table, min(n, SIZE_HINT_LIMIT)) if keep not in ('last', 'first', False): raise ValueError('keep must be either "first", "last" or False') diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index fa77af6bd5a25..811e28b830921 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -67,7 +67,7 @@ from pandas._libs.khash cimport ( khiter_t, ) -from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat import get_lzma_file, import_lzma from pandas.errors import DtypeWarning, EmptyDataError, ParserError, ParserWarning from pandas.core.dtypes.common import ( @@ -82,7 +82,7 @@ from pandas.core.dtypes.common import ( ) from pandas.core.dtypes.concat import union_categoricals -lzma = _import_lzma() +lzma = import_lzma() cdef: float64_t INF = np.inf @@ -638,9 +638,9 @@ cdef class TextReader: f'zip file {zip_names}') elif self.compression == 'xz': if isinstance(source, str): - source = _get_lzma_file(lzma)(source, 'rb') + source = get_lzma_file(lzma)(source, 'rb') else: - source = _get_lzma_file(lzma)(filename=source) + source = get_lzma_file(lzma)(filename=source) else: raise ValueError(f'Unrecognized compression type: ' f'{self.compression}') diff --git a/pandas/_testing.py b/pandas/_testing.py index 04d36749a3d8c..7dba578951deb 100644 --- a/pandas/_testing.py +++ b/pandas/_testing.py @@ -25,7 +25,7 @@ from pandas._libs.lib import no_default import pandas._libs.testing as _testing from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries -from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat import get_lzma_file, import_lzma from pandas.core.dtypes.common import ( is_bool, @@ -70,7 +70,7 @@ from pandas.io.common import urlopen from pandas.io.formats.printing import pprint_thing -lzma = _import_lzma() +lzma = import_lzma() _N = 30 _K = 4 @@ -243,7 +243,7 @@ def decompress_file(path, compression): elif compression == "bz2": f = bz2.BZ2File(path, "rb") elif compression == "xz": - f = _get_lzma_file(lzma)(path, "rb") + f = get_lzma_file(lzma)(path, "rb") elif compression == "zip": zip_file = zipfile.ZipFile(path) zip_names = zip_file.namelist() @@ -288,7 +288,7 @@ def write_to_compressed(compression, path, data, dest="test"): elif compression == "bz2": compress_method = bz2.BZ2File elif compression == "xz": - compress_method = _get_lzma_file(lzma) + compress_method = get_lzma_file(lzma) else: raise ValueError(f"Unrecognized compression type: {compression}") diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index f2018a5c01711..57e378758cc78 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -77,7 +77,7 @@ def is_platform_mac() -> bool: return sys.platform == "darwin" -def _import_lzma(): +def import_lzma(): """ Importing the `lzma` module. @@ -97,7 +97,7 @@ def _import_lzma(): warnings.warn(msg) -def _get_lzma_file(lzma): +def get_lzma_file(lzma): """ Importing the `LZMAFile` class from the `lzma` module. diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 50ec3714f454b..57e63daff29e4 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -462,7 +462,7 @@ def isin(comps: AnyArrayLike, values: AnyArrayLike) -> np.ndarray: return f(comps, values) -def _factorize_array( +def factorize_array( values, na_sentinel: int = -1, size_hint=None, na_value=None, mask=None ) -> Tuple[np.ndarray, np.ndarray]: """ @@ -671,7 +671,7 @@ def factorize( else: na_value = None - codes, uniques = _factorize_array( + codes, uniques = factorize_array( values, na_sentinel=na_sentinel, size_hint=size_hint, na_value=na_value ) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 8193d65b3b30c..0c8efda5fc588 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -31,7 +31,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import ops -from pandas.core.algorithms import _factorize_array, unique +from pandas.core.algorithms import factorize_array, unique from pandas.core.missing import backfill_1d, pad_1d from pandas.core.sorting import nargminmax, nargsort @@ -845,7 +845,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, "ExtensionArray" # Complete control over factorization. arr, na_value = self._values_for_factorize() - codes, uniques = _factorize_array( + codes, uniques = factorize_array( arr, na_sentinel=na_sentinel, na_value=na_value ) diff --git a/pandas/core/arrays/masked.py b/pandas/core/arrays/masked.py index 1237dea5c1a64..31274232e2525 100644 --- a/pandas/core/arrays/masked.py +++ b/pandas/core/arrays/masked.py @@ -17,7 +17,7 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core import nanops -from pandas.core.algorithms import _factorize_array, take +from pandas.core.algorithms import factorize_array, take from pandas.core.array_algos import masked_reductions from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin from pandas.core.indexers import check_array_indexer @@ -287,7 +287,7 @@ def factorize(self, na_sentinel: int = -1) -> Tuple[np.ndarray, ExtensionArray]: arr = self._data mask = self._mask - codes, uniques = _factorize_array(arr, na_sentinel=na_sentinel, mask=mask) + codes, uniques = factorize_array(arr, na_sentinel=na_sentinel, mask=mask) # the hashtables don't handle all different types of bits uniques = uniques.astype(self.dtype.numpy_dtype, copy=False) diff --git a/pandas/core/computation/check.py b/pandas/core/computation/check.py index 4d205909b9e2e..6c7261b3b33c9 100644 --- a/pandas/core/computation/check.py +++ b/pandas/core/computation/check.py @@ -1,10 +1,10 @@ from pandas.compat._optional import import_optional_dependency ne = import_optional_dependency("numexpr", raise_on_missing=False, on_version="warn") -_NUMEXPR_INSTALLED = ne is not None -if _NUMEXPR_INSTALLED: - _NUMEXPR_VERSION = ne.__version__ +NUMEXPR_INSTALLED = ne is not None +if NUMEXPR_INSTALLED: + NUMEXPR_VERSION = ne.__version__ else: - _NUMEXPR_VERSION = None + NUMEXPR_VERSION = None -__all__ = ["_NUMEXPR_INSTALLED", "_NUMEXPR_VERSION"] +__all__ = ["NUMEXPR_INSTALLED", "NUMEXPR_VERSION"] diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index b74f99fca21c7..f6a7935142a32 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -38,10 +38,10 @@ def _check_engine(engine: Optional[str]) -> str: str Engine name. """ - from pandas.core.computation.check import _NUMEXPR_INSTALLED + from pandas.core.computation.check import NUMEXPR_INSTALLED if engine is None: - engine = "numexpr" if _NUMEXPR_INSTALLED else "python" + engine = "numexpr" if NUMEXPR_INSTALLED else "python" if engine not in _engines: valid_engines = list(_engines.keys()) @@ -53,7 +53,7 @@ def _check_engine(engine: Optional[str]) -> str: # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == "numexpr": - if not _NUMEXPR_INSTALLED: + if not NUMEXPR_INSTALLED: raise ImportError( "'numexpr' is not installed or an unsupported version. Cannot use " "engine='numexpr' for query/eval if 'numexpr' is not installed" diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index a9c0cb0571446..d2c08c343ab4b 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -15,15 +15,15 @@ from pandas.core.dtypes.generic import ABCDataFrame -from pandas.core.computation.check import _NUMEXPR_INSTALLED +from pandas.core.computation.check import NUMEXPR_INSTALLED from pandas.core.ops import roperator -if _NUMEXPR_INSTALLED: +if NUMEXPR_INSTALLED: import numexpr as ne _TEST_MODE = None _TEST_RESULT: List[bool] = list() -_USE_NUMEXPR = _NUMEXPR_INSTALLED +_USE_NUMEXPR = NUMEXPR_INSTALLED _evaluate = None _where = None @@ -40,7 +40,7 @@ def set_use_numexpr(v=True): # set/unset to use numexpr global _USE_NUMEXPR - if _NUMEXPR_INSTALLED: + if NUMEXPR_INSTALLED: _USE_NUMEXPR = v # choose what we are going to do @@ -53,7 +53,7 @@ def set_use_numexpr(v=True): def set_numexpr_threads(n=None): # if we are using numexpr, set the threads to n # otherwise reset - if _NUMEXPR_INSTALLED and _USE_NUMEXPR: + if NUMEXPR_INSTALLED and _USE_NUMEXPR: if n is None: n = ne.detect_number_of_cores() ne.set_num_threads(n) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index b2144c45c6323..1fb3910b8577d 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -600,11 +600,11 @@ def __repr__(self) -> str: class FuncNode: def __init__(self, name: str): - from pandas.core.computation.check import _NUMEXPR_INSTALLED, _NUMEXPR_VERSION + from pandas.core.computation.check import NUMEXPR_INSTALLED, NUMEXPR_VERSION if name not in _mathops or ( - _NUMEXPR_INSTALLED - and _NUMEXPR_VERSION < LooseVersion("2.6.9") + NUMEXPR_INSTALLED + and NUMEXPR_VERSION < LooseVersion("2.6.9") and name in ("floor", "ceil") ): raise ValueError(f'"{name}" is not a supported function') diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 150d6e24dbb86..e1a889bf79d95 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5257,7 +5257,7 @@ def duplicated( 4 True dtype: bool """ - from pandas._libs.hashtable import _SIZE_HINT_LIMIT, duplicated_int64 + from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64 from pandas.core.sorting import get_group_index @@ -5266,7 +5266,7 @@ def duplicated( def f(vals): labels, shape = algorithms.factorize( - vals, size_hint=min(len(self), _SIZE_HINT_LIMIT) + vals, size_hint=min(len(self), SIZE_HINT_LIMIT) ) return labels.astype("i8", copy=False), len(shape) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 080ece8547479..e49a23935efbd 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1342,9 +1342,9 @@ def format( ) if adjoin: - from pandas.io.formats.format import _get_adjustment + from pandas.io.formats.format import get_adjustment - adj = _get_adjustment() + adj = get_adjustment() return adj.adjoin(space, *result_levels).split("\n") else: return result_levels diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index e12e0d7760ea7..fbccac1c2af67 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -10,8 +10,8 @@ IntBlock, ObjectBlock, TimeDeltaBlock, - _safe_reshape, make_block, + safe_reshape, ) from pandas.core.internals.concat import concatenate_block_managers from pandas.core.internals.managers import ( @@ -33,7 +33,7 @@ "IntBlock", "ObjectBlock", "TimeDeltaBlock", - "_safe_reshape", + "safe_reshape", "make_block", "BlockManager", "SingleBlockManager", diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9f4e535dc787d..0271304442e45 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1673,7 +1673,7 @@ def putmask( if isinstance(new, (np.ndarray, ExtensionArray)) and len(new) == len(mask): new = new[mask] - mask = _safe_reshape(mask, new_values.shape) + mask = safe_reshape(mask, new_values.shape) new_values[mask] = new return [self.make_block(values=new_values)] @@ -2815,7 +2815,7 @@ def _block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: return values -def _safe_reshape(arr, new_shape): +def safe_reshape(arr, new_shape): """ If possible, reshape `arr` to have shape `new_shape`, with a couple of exceptions (see gh-13012): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 13bc6a2e82195..3f446874ffd0e 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -47,10 +47,10 @@ DatetimeTZBlock, ExtensionBlock, ObjectValuesExtensionBlock, - _safe_reshape, extend_blocks, get_block_type, make_block, + safe_reshape, ) from pandas.core.internals.ops import blockwise_all, operate_blockwise @@ -1015,7 +1015,7 @@ def value_getitem(placement): else: if value.ndim == self.ndim - 1: - value = _safe_reshape(value, (1,) + value.shape) + value = safe_reshape(value, (1,) + value.shape) def value_getitem(placement): return value @@ -1138,7 +1138,7 @@ def insert(self, loc: int, item: Label, value, allow_duplicates: bool = False): if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value.dtype): # TODO(EA2D): special case not needed with 2D EAs - value = _safe_reshape(value, (1,) + value.shape) + value = safe_reshape(value, (1,) + value.shape) block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 8bdd466ae6f33..d03b2f29521b7 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -520,7 +520,7 @@ def compress_group_index(group_index, sort: bool = True): space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). """ - size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT) + size_hint = min(len(group_index), hashtable.SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) group_index = ensure_int64(group_index) diff --git a/pandas/core/window/common.py b/pandas/core/window/common.py index 2f3058db4493b..df60d2dcf5e84 100644 --- a/pandas/core/window/common.py +++ b/pandas/core/window/common.py @@ -92,7 +92,7 @@ def f(x, name=name, *args): return self._groupby.apply(f) -def _flex_binary_moment(arg1, arg2, f, pairwise=False): +def flex_binary_moment(arg1, arg2, f, pairwise=False): if not ( isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame)) @@ -222,7 +222,7 @@ def dataframe_from_int_dict(data, frame_template): return dataframe_from_int_dict(results, arg1) else: - return _flex_binary_moment(arg2, arg1, f) + return flex_binary_moment(arg2, arg1, f) def zsqrt(x): diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 1913b51a68c15..2bd36d8bff155 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -15,7 +15,7 @@ import pandas.core.common as common from pandas.core.window.common import _doc_template, _shared_docs, zsqrt -from pandas.core.window.rolling import _flex_binary_moment, _Rolling +from pandas.core.window.rolling import _Rolling, flex_binary_moment _bias_template = """ Parameters @@ -416,7 +416,7 @@ def _get_cov(X, Y): ) return X._wrap_result(cov) - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) ) @@ -470,6 +470,6 @@ def _cov(x, y): corr = cov / zsqrt(x_var * y_var) return X._wrap_result(corr) - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) ) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 558c0eeb0ea65..4c4ec4d700b7f 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -54,8 +54,8 @@ from pandas.core.window.common import ( WindowGroupByMixin, _doc_template, - _flex_binary_moment, _shared_docs, + flex_binary_moment, zsqrt, ) from pandas.core.window.indexers import ( @@ -1774,7 +1774,7 @@ def _get_cov(X, Y): bias_adj = count / (count - ddof) return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) ) @@ -1913,7 +1913,7 @@ def _get_corr(a, b): return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs)) - return _flex_binary_moment( + return flex_binary_moment( self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) ) diff --git a/pandas/io/common.py b/pandas/io/common.py index a80b89569f429..3f130401558dd 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -40,12 +40,12 @@ ModeVar, StorageOptions, ) -from pandas.compat import _get_lzma_file, _import_lzma +from pandas.compat import get_lzma_file, import_lzma from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_file_like -lzma = _import_lzma() +lzma = import_lzma() _VALID_URLS = set(uses_relative + uses_netloc + uses_params) @@ -562,7 +562,7 @@ def get_handle( # XZ Compression elif compression == "xz": - f = _get_lzma_file(lzma)(path_or_buf, mode) + f = get_lzma_file(lzma)(path_or_buf, mode) # Unrecognized Compression else: diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 74eb65521f5b2..87343c22ad4e9 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -346,7 +346,7 @@ def read_excel( ) -class _BaseExcelReader(metaclass=abc.ABCMeta): +class BaseExcelReader(metaclass=abc.ABCMeta): def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): # If filepath_or_buffer is a url, load the data into a BytesIO if is_url(filepath_or_buffer): diff --git a/pandas/io/excel/_odfreader.py b/pandas/io/excel/_odfreader.py index 6cbca59aed97e..02575ab878f6e 100644 --- a/pandas/io/excel/_odfreader.py +++ b/pandas/io/excel/_odfreader.py @@ -7,10 +7,10 @@ import pandas as pd -from pandas.io.excel._base import _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader -class _ODFReader(_BaseExcelReader): +class _ODFReader(BaseExcelReader): """ Read tables out of OpenDocument formatted files. diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 89b581da6ed31..f395127902101 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -5,7 +5,7 @@ from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency -from pandas.io.excel._base import ExcelWriter, _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader, ExcelWriter from pandas.io.excel._util import validate_freeze_panes if TYPE_CHECKING: @@ -438,7 +438,7 @@ def write_cells( setattr(xcell, k, v) -class _OpenpyxlReader(_BaseExcelReader): +class _OpenpyxlReader(BaseExcelReader): def __init__( self, filepath_or_buffer: FilePathOrBuffer, diff --git a/pandas/io/excel/_pyxlsb.py b/pandas/io/excel/_pyxlsb.py index c15a52abe4d53..069c3a2eaa643 100644 --- a/pandas/io/excel/_pyxlsb.py +++ b/pandas/io/excel/_pyxlsb.py @@ -3,10 +3,10 @@ from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency -from pandas.io.excel._base import _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader -class _PyxlsbReader(_BaseExcelReader): +class _PyxlsbReader(BaseExcelReader): def __init__( self, filepath_or_buffer: FilePathOrBuffer, diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py index a7fb519af61c6..9057106fb08e5 100644 --- a/pandas/io/excel/_xlrd.py +++ b/pandas/io/excel/_xlrd.py @@ -5,10 +5,10 @@ from pandas._typing import StorageOptions from pandas.compat._optional import import_optional_dependency -from pandas.io.excel._base import _BaseExcelReader +from pandas.io.excel._base import BaseExcelReader -class _XlrdReader(_BaseExcelReader): +class _XlrdReader(BaseExcelReader): def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None): """ Reader using xlrd engine. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 3dc4290953360..53b2b533215f0 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -256,7 +256,7 @@ def __init__( float_format = get_option("display.float_format") self.float_format = float_format self.dtype = dtype - self.adj = _get_adjustment() + self.adj = get_adjustment() self._chk_truncate() @@ -439,7 +439,7 @@ def _get_pad(t): return [x.rjust(_get_pad(x)) for x in texts] -def _get_adjustment() -> TextAdjustment: +def get_adjustment() -> TextAdjustment: use_east_asian_width = get_option("display.unicode.east_asian_width") if use_east_asian_width: return EastAsianTextAdjustment() @@ -628,7 +628,7 @@ def __init__( self.columns = frame.columns self._chk_truncate() - self.adj = _get_adjustment() + self.adj = get_adjustment() def _chk_truncate(self) -> None: """ @@ -1733,7 +1733,7 @@ def _make_fixed_width( return strings if adj is None: - adj = _get_adjustment() + adj = get_adjustment() max_len = max(adj.len(x) for x in strings) diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index 23daab725ec65..edc6fbfff61d7 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -321,7 +321,7 @@ def format_object_summary( summary string """ from pandas.io.formats.console import get_console_size - from pandas.io.formats.format import _get_adjustment + from pandas.io.formats.format import get_adjustment display_width, _ = get_console_size() if display_width is None: @@ -350,7 +350,7 @@ def format_object_summary( is_truncated = n > max_seq_items # adj can optionally handle unicode eastern asian width - adj = _get_adjustment() + adj = get_adjustment() def _extend_line( s: str, line: str, value: str, display_width: int, next_line_prefix: str diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index b3fbd8c17d8bf..ead102f532a20 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -12,16 +12,16 @@ def test_compat(): # test we have compat with our version of nu - from pandas.core.computation.check import _NUMEXPR_INSTALLED + from pandas.core.computation.check import NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if LooseVersion(ver) < LooseVersion(VERSIONS["numexpr"]): - assert not _NUMEXPR_INSTALLED + assert not NUMEXPR_INSTALLED else: - assert _NUMEXPR_INSTALLED + assert NUMEXPR_INSTALLED except ImportError: pytest.skip("not testing numexpr version compat") diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 853ab00853d1b..49066428eb16c 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -18,7 +18,7 @@ from pandas import DataFrame, Series, compat, date_range import pandas._testing as tm from pandas.core.computation import pytables -from pandas.core.computation.check import _NUMEXPR_VERSION +from pandas.core.computation.check import NUMEXPR_VERSION from pandas.core.computation.engines import NumExprClobberingError, _engines import pandas.core.computation.expr as expr from pandas.core.computation.expr import ( @@ -26,7 +26,7 @@ PandasExprVisitor, PythonExprVisitor, ) -from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR +from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED from pandas.core.computation.ops import ( _arith_ops_syms, _binary_math_ops, @@ -43,7 +43,7 @@ marks=pytest.mark.skipif( engine == "numexpr" and not _USE_NUMEXPR, reason=f"numexpr enabled->{_USE_NUMEXPR}, " - f"installed->{_NUMEXPR_INSTALLED}", + f"installed->{NUMEXPR_INSTALLED}", ), ) for engine in _engines @@ -60,15 +60,15 @@ def parser(request): @pytest.fixture def ne_lt_2_6_9(): - if _NUMEXPR_INSTALLED and _NUMEXPR_VERSION >= LooseVersion("2.6.9"): + if NUMEXPR_INSTALLED and NUMEXPR_VERSION >= LooseVersion("2.6.9"): pytest.skip("numexpr is >= 2.6.9") return "numexpr" @pytest.fixture def unary_fns_for_ne(): - if _NUMEXPR_INSTALLED: - if _NUMEXPR_VERSION >= LooseVersion("2.6.9"): + if NUMEXPR_INSTALLED: + if NUMEXPR_VERSION >= LooseVersion("2.6.9"): return _unary_math_ops else: return tuple(x for x in _unary_math_ops if x not in ("floor", "ceil")) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index 447a6108fc3c7..e3cdeb9c1951f 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -189,7 +189,7 @@ def _concat_same_type(cls, to_concat): def _values_for_factorize(self): frozen = self._values_for_argsort() if len(frozen) == 0: - # _factorize_array expects 1-d array, this is a len-0 2-d array. + # factorize_array expects 1-d array, this is a len-0 2-d array. frozen = frozen.ravel() return frozen, () diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index e17357e9845b5..70d0b4e9e835c 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -11,7 +11,7 @@ from pandas import DataFrame, MultiIndex, Series import pandas._testing as tm import pandas.core.common as com -from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED +from pandas.core.computation.expressions import _MIN_ELEMENTS, NUMEXPR_INSTALLED from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int # ------------------------------------------------------------------- @@ -375,7 +375,7 @@ def test_floordiv_axis0(self): result2 = df.floordiv(ser.values, axis=0) tm.assert_frame_equal(result2, expected) - @pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed") + @pytest.mark.skipif(not NUMEXPR_INSTALLED, reason="numexpr not installed") @pytest.mark.parametrize("opname", ["floordiv", "pow"]) def test_floordiv_axis0_numexpr_path(self, opname): # case that goes through numexpr and has to fall back to masked_arith_op diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 56d178daee7fd..2994482fa5139 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -9,7 +9,7 @@ import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, date_range import pandas._testing as tm -from pandas.core.computation.check import _NUMEXPR_INSTALLED +from pandas.core.computation.check import NUMEXPR_INSTALLED PARSERS = "python", "pandas" ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne) @@ -39,7 +39,7 @@ def setup_method(self, method): def test_query_default(self): # GH 12749 - # this should always work, whether _NUMEXPR_INSTALLED or not + # this should always work, whether NUMEXPR_INSTALLED or not df = self.df result = df.query("A>0") tm.assert_frame_equal(result, self.expected1) @@ -65,7 +65,7 @@ def test_query_python(self): def test_query_numexpr(self): df = self.df - if _NUMEXPR_INSTALLED: + if NUMEXPR_INSTALLED: result = df.query("A>0", engine="numexpr") tm.assert_frame_equal(result, self.expected1) result = df.eval("A+1", engine="numexpr") diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 22942ed75d0f3..1fb957505987f 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -226,7 +226,7 @@ def test_repr_truncation(self): r = repr(df) r = r[r.find("\n") + 1 :] - adj = fmt._get_adjustment() + adj = fmt.get_adjustment() for line, value in zip(r.split("\n"), df["B"]): if adj.len(value) + 1 > max_len: diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index d1c6705dd7a6f..2241fe7013568 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -24,7 +24,7 @@ import pytest -from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian +from pandas.compat import get_lzma_file, import_lzma, is_platform_little_endian import pandas.util._test_decorators as td import pandas as pd @@ -33,7 +33,7 @@ from pandas.tseries.offsets import Day, MonthEnd -lzma = _import_lzma() +lzma = import_lzma() @pytest.fixture(scope="module") @@ -268,7 +268,7 @@ def compress_file(self, src_path, dest_path, compression): with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f: f.write(src_path, os.path.basename(src_path)) elif compression == "xz": - f = _get_lzma_file(lzma)(dest_path, "w") + f = get_lzma_file(lzma)(dest_path, "w") else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 72a679d980641..ec7413514d430 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -303,7 +303,7 @@ def test_parametrized_factorize_na_value_default(self, data): ], ) def test_parametrized_factorize_na_value(self, data, na_value): - codes, uniques = algos._factorize_array(data, na_value=na_value) + codes, uniques = algos.factorize_array(data, na_value=na_value) expected_uniques = data[[1, 3]] expected_codes = np.array([-1, 0, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(codes, expected_codes) diff --git a/pandas/tests/window/moments/test_moments_consistency_rolling.py b/pandas/tests/window/moments/test_moments_consistency_rolling.py index 158b994cf03ae..dfcbdde466d44 100644 --- a/pandas/tests/window/moments/test_moments_consistency_rolling.py +++ b/pandas/tests/window/moments/test_moments_consistency_rolling.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import DataFrame, DatetimeIndex, Index, Series import pandas._testing as tm -from pandas.core.window.common import _flex_binary_moment +from pandas.core.window.common import flex_binary_moment from pandas.tests.window.common import ( check_pairwise_moment, moments_consistency_cov_data, @@ -150,7 +150,7 @@ def test_flex_binary_moment(): # don't blow the stack msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame" with pytest.raises(TypeError, match=msg): - _flex_binary_moment(5, 6, None) + flex_binary_moment(5, 6, None) def test_corr_sanity(): diff --git a/pandas/tests/window/test_pairwise.py b/pandas/tests/window/test_pairwise.py index 7425cc5df4c2f..7f4e85b385b2d 100644 --- a/pandas/tests/window/test_pairwise.py +++ b/pandas/tests/window/test_pairwise.py @@ -41,7 +41,7 @@ def compare(self, result, expected): @pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()]) def test_no_flex(self, f): - # DataFrame methods (which do not call _flex_binary_moment()) + # DataFrame methods (which do not call flex_binary_moment()) results = [f(df) for df in self.df1s] for (df, result) in zip(self.df1s, results): diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 78facd6694635..94c252eca1671 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -35,7 +35,7 @@ def test_foo(): from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import _np_version -from pandas.core.computation.expressions import _NUMEXPR_INSTALLED, _USE_NUMEXPR +from pandas.core.computation.expressions import _USE_NUMEXPR, NUMEXPR_INSTALLED def safe_import(mod_name: str, min_version: Optional[str] = None): @@ -196,7 +196,7 @@ def skip_if_no(package: str, min_version: Optional[str] = None): ) skip_if_no_ne = pytest.mark.skipif( not _USE_NUMEXPR, - reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{_NUMEXPR_INSTALLED}", + reason=f"numexpr enabled->{_USE_NUMEXPR}, installed->{NUMEXPR_INSTALLED}", )