From f27bf4d59de442dfd493ae4d91dff4bbb2ab64cc Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:37:47 -0700 Subject: [PATCH 01/17] Move (mostly deprecated) frontend detection to io.formats.console --- pandas/core/arrays/categorical.py | 3 +- pandas/core/common.py | 72 ----------------------------- pandas/core/frame.py | 6 +-- pandas/io/formats/console.py | 77 +++++++++++++++++++++++++++++-- 4 files changed, 79 insertions(+), 79 deletions(-) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 973a8af76bb07..b390d3b9e6a57 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -43,6 +43,7 @@ import pandas.core.algorithms as algorithms +from pandas.io.formats import console from pandas.io.formats.terminal import get_terminal_size from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.config import get_option @@ -1887,7 +1888,7 @@ def _repr_categories_info(self): length=len(self.categories), dtype=dtype) width, height = get_terminal_size() max_width = get_option("display.width") or width - if com.in_ipython_frontend(): + if console.in_ipython_frontend(): # 0 = no breaks max_width = 0 levstring = "" diff --git a/pandas/core/common.py b/pandas/core/common.py index 0ca776b6bfa77..d2bb7f91d1ab7 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -12,7 +12,6 @@ from pandas import compat from pandas.compat import long, zip, iteritems, PY36, OrderedDict -from pandas.core.config import get_option from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string @@ -464,77 +463,6 @@ class Sentinel(object): return Sentinel() -# ---------------------------------------------------------------------- -# Detect our environment - -def in_interactive_session(): - """ check if we're running in an interactive shell - - returns True if running under python/ipython interactive shell - """ - - def check_main(): - import __main__ as main - return (not hasattr(main, '__file__') or - get_option('mode.sim_interactive')) - - try: - return __IPYTHON__ or check_main() # noqa - except: - return check_main() - - -def in_qtconsole(): - """ - check if we're inside an IPython qtconsole - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'qtconsole' in front_end.lower(): - return True - except: - return False - return False - - -def in_ipnb(): - """ - check if we're inside an IPython Notebook - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'notebook' in front_end.lower(): - return True - except: - return False - return False - - -def in_ipython_frontend(): - """ - check if we're inside an an IPython zmq frontend - """ - try: - ip = get_ipython() # noqa - return 'zmq' in str(type(ip)).lower() - except: - pass - - return False - - def _random_state(state=None): """ Helper function for processing random_state arguments. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4578d2ac08199..d0f9ac1694caf 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -616,11 +616,11 @@ def _repr_fits_horizontal_(self, ignore_width=False): # used by repr_html under IPython notebook or scripts ignore terminal # dims - if ignore_width or not com.in_interactive_session(): + if ignore_width or not console.in_interactive_session(): return True if (get_option('display.width') is not None or - com.in_ipython_frontend()): + console.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: @@ -688,7 +688,7 @@ def _repr_html_(self): # XXX: In IPython 3.x and above, the Qt console will not attempt to # display HTML, so this check can be removed when support for # IPython 2.x is no longer needed. - if com.in_qtconsole(): + if console.in_qtconsole(): # 'HTML output is disabled in QtConsole' return None diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index 36eac8dd57fbd..45d50ea3fa073 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -49,7 +49,6 @@ def get_console_size(): Returns (None,None) in non-interactive session. """ from pandas import get_option - from pandas.core import common as com display_width = get_option('display.width') # deprecated. @@ -65,8 +64,8 @@ def get_console_size(): # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. - if com.in_interactive_session(): - if com.in_ipython_frontend(): + if in_interactive_session(): + if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas.core.config import get_default_val @@ -82,3 +81,75 @@ def get_console_size(): # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height) + + +# ---------------------------------------------------------------------- +# Detect our environment + +def in_interactive_session(): + """ check if we're running in an interactive shell + + returns True if running under python/ipython interactive shell + """ + from pandas import get_option + + def check_main(): + import __main__ as main + return (not hasattr(main, '__file__') or + get_option('mode.sim_interactive')) + + try: + return __IPYTHON__ or check_main() # noqa + except: + return check_main() + + +def in_qtconsole(): + """ + check if we're inside an IPython qtconsole + + .. deprecated:: 0.14.1 + This is no longer needed, or working, in IPython 3 and above. + """ + try: + ip = get_ipython() # noqa + front_end = ( + ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) + if 'qtconsole' in front_end.lower(): + return True + except: + return False + return False + + +def in_ipnb(): + """ + check if we're inside an IPython Notebook + + .. deprecated:: 0.14.1 + This is no longer needed, or working, in IPython 3 and above. + """ + try: + ip = get_ipython() # noqa + front_end = ( + ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) + if 'notebook' in front_end.lower(): + return True + except: + return False + return False + + +def in_ipython_frontend(): + """ + check if we're inside an an IPython zmq frontend + """ + try: + ip = get_ipython() # noqa + return 'zmq' in str(type(ip)).lower() + except: + pass + + return False From cd6d6b0d516a8a09e8fbacf1b7ae7e723a1a8582 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:38:32 -0700 Subject: [PATCH 02/17] deprivatize maybe_box --- pandas/core/base.py | 2 +- pandas/core/common.py | 6 +++--- pandas/core/frame.py | 8 ++++---- pandas/core/generic.py | 2 +- pandas/core/indexes/datetimes.py | 4 ++-- pandas/core/indexes/interval.py | 4 ++-- pandas/core/indexes/period.py | 6 +++--- pandas/core/indexes/timedeltas.py | 4 ++-- pandas/core/series.py | 2 +- 9 files changed, 19 insertions(+), 19 deletions(-) diff --git a/pandas/core/base.py b/pandas/core/base.py index 1226662824eb5..4f49010bd5b73 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -856,7 +856,7 @@ def tolist(self): numpy.ndarray.tolist """ if is_datetimelike(self._values): - return [com._maybe_box_datetimelike(x) for x in self._values] + return [com.maybe_box_datetimelike(x) for x in self._values] elif is_extension_array_dtype(self._values): return list(self._values) else: diff --git a/pandas/core/common.py b/pandas/core/common.py index d2bb7f91d1ab7..8e62969bae3ac 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -72,7 +72,7 @@ def _get_info_slice(obj, indexer): return tuple(slices) -def _maybe_box(indexer, values, obj, key): +def maybe_box(indexer, values, obj, key): # if we have multiples coming back, box em if isinstance(values, np.ndarray): @@ -82,7 +82,7 @@ def _maybe_box(indexer, values, obj, key): return values -def _maybe_box_datetimelike(value): +def maybe_box_datetimelike(value): # turn a datetime like into a Timestamp/timedelta as needed if isinstance(value, (np.datetime64, datetime)): @@ -416,7 +416,7 @@ def _dict_compat(d): dict """ - return dict((_maybe_box_datetimelike(key), value) + return dict((maybe_box_datetimelike(key), value) for key, value in iteritems(d)) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d0f9ac1694caf..4b479af3d12eb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1099,13 +1099,13 @@ def to_dict(self, orient='dict', into=dict): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', lib.map_infer(self.values.ravel(), - com._maybe_box_datetimelike) + com.maybe_box_datetimelike) .reshape(self.values.shape).tolist()))) elif orient.lower().startswith('s'): - return into_c((k, com._maybe_box_datetimelike(v)) + return into_c((k, com.maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): - return [into_c((k, com._maybe_box_datetimelike(v)) + return [into_c((k, com.maybe_box_datetimelike(v)) for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): @@ -2613,7 +2613,7 @@ def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) - return com._maybe_box_datetimelike(series._values[index]) + return com.maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 38f334762fa88..866473b01d0e1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3155,7 +3155,7 @@ def xs(self, key, axis=0, level=None, drop_level=True): # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) if not is_list_like(new_values) or self.ndim == 1: - return com._maybe_box_datetimelike(new_values) + return com.maybe_box_datetimelike(new_values) result = self._constructor_sliced( new_values, index=self.columns, diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7257be421c3e1..06f533796e557 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1311,7 +1311,7 @@ def get_value(self, series, key): return series.take(locs) try: - return com._maybe_box(self, Index.get_value(self, series, key), + return com.maybe_box(self, Index.get_value(self, series, key), series, key) except KeyError: try: @@ -1333,7 +1333,7 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key) values = self._engine.get_value(com._values_from_object(series), key, tz=self.tz) - return com._maybe_box(self, values, series, key) + return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e92f980caf3dc..f96ea62d73bb7 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1120,8 +1120,8 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - start = com._maybe_box_datetimelike(start) - end = com._maybe_box_datetimelike(end) + start = com.maybe_box_datetimelike(start) + end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com._any_none(periods, start, end): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..f6ac84b65e4d2 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -527,7 +527,7 @@ def get_value(self, series, key): """ s = com._values_from_object(series) try: - return com._maybe_box(self, + return com.maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key) except (KeyError, IndexError): @@ -552,7 +552,7 @@ def get_value(self, series, key): return series[key] elif grp == freqn: key = Period(asdt, freq=self.freq).ordinal - return com._maybe_box(self, self._engine.get_value(s, key), + return com.maybe_box(self, self._engine.get_value(s, key), series, key) else: raise KeyError(key) @@ -560,7 +560,7 @@ def get_value(self, series, key): pass key = Period(key, self.freq).ordinal - return com._maybe_box(self, self._engine.get_value(s, key), + return com.maybe_box(self, self._engine.get_value(s, key), series, key) @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dc26c9cc0c248..15d07508542cb 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -499,7 +499,7 @@ def get_value(self, series, key): return self.get_value_maybe_box(series, key) try: - return com._maybe_box(self, Index.get_value(self, series, key), + return com.maybe_box(self, Index.get_value(self, series, key), series, key) except KeyError: try: @@ -517,7 +517,7 @@ def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) values = self._engine.get_value(com._values_from_object(series), key) - return com._maybe_box(self, values, series, key) + return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/series.py b/pandas/core/series.py index 77445159129f2..71fa33fd7e2b1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1047,7 +1047,7 @@ def get_value(self, label, takeable=False): def _get_value(self, label, takeable=False): if takeable is True: - return com._maybe_box_datetimelike(self._values[label]) + return com.maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) _get_value.__doc__ = get_value.__doc__ From 95c90a1964d810686ab146c46b56a739b52bb8e8 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:39:29 -0700 Subject: [PATCH 03/17] deprivatize asarray_tuplesafe --- pandas/core/algorithms.py | 2 +- pandas/core/arrays/interval.py | 2 +- pandas/core/frame.py | 2 +- pandas/core/groupby/grouper.py | 4 ++-- pandas/core/indexes/base.py | 6 +++--- pandas/core/indexes/category.py | 2 +- pandas/core/indexes/numeric.py | 4 ++-- pandas/core/indexing.py | 2 +- pandas/core/series.py | 4 ++-- pandas/core/window.py | 2 +- pandas/io/pytables.py | 2 +- pandas/plotting/_converter.py | 2 +- pandas/tests/groupby/test_groupby.py | 8 ++++---- pandas/tests/indexes/interval/test_construction.py | 2 +- pandas/tests/indexes/interval/test_interval.py | 4 ++-- pandas/tests/test_algos.py | 2 +- pandas/tests/test_sorting.py | 2 +- 17 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 78c9113ce60de..0134c83e7a25a 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -262,7 +262,7 @@ def match(to_match, values, na_sentinel=-1): ------- match : ndarray of integers """ - values = com._asarray_tuplesafe(values) + values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c915b272aee8b..62a47ef76a8c6 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -984,7 +984,7 @@ def __array__(self, dtype=None): examples='', )) def to_tuples(self, na_tuple=True): - tuples = com._asarray_tuplesafe(zip(self.left, self.right)) + tuples = com.asarray_tuplesafe(zip(self.left, self.right)) if not na_tuple: # GH 18756 tuples = np.where(~self.isna(), tuples, np.nan) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4b479af3d12eb..82e499e1e8e8d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3488,7 +3488,7 @@ def reindexer(value): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: - value = com._asarray_tuplesafe(value) + value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index a1511b726c705..f3621b5e828af 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -288,7 +288,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = self.obj[self.name] elif isinstance(self.grouper, (list, tuple)): - self.grouper = com._asarray_tuplesafe(self.grouper) + self.grouper = com.asarray_tuplesafe(self.grouper) # a passed Categorical elif is_categorical_dtype(self.grouper): @@ -533,7 +533,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if not any_callable and not all_in_columns_index and \ not any_arraylike and not any_groupers and \ match_axis_length and level is None: - keys = [com._asarray_tuplesafe(keys)] + keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..33e3cf35c13c7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -366,7 +366,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype('object') else: - subarr = com._asarray_tuplesafe(data, dtype=object) + subarr = com.asarray_tuplesafe(data, dtype=object) # _asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens @@ -434,7 +434,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return MultiIndex.from_tuples( data, names=name or kwargs.get('names')) # other iterable of some kind - subarr = com._asarray_tuplesafe(data, dtype=object) + subarr = com.asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) """ @@ -1686,7 +1686,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) return keyarr _index_shared_docs['_convert_index_indexer'] = """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index a03e478f81caf..91f1f84dfbe96 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -629,7 +629,7 @@ def _convert_list_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 1fe0c8fa289e6..651cf07cba5bf 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -249,9 +249,9 @@ def _convert_arr_indexer(self, keyarr): # Cast the indexer to uint64 if possible so # that the values returned from indexing are # also uint64. - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) if is_integer_dtype(keyarr): - return com._asarray_tuplesafe(keyarr, dtype=np.uint64) + return com.asarray_tuplesafe(keyarr, dtype=np.uint64) return keyarr @Appender(_index_shared_docs['_convert_index_indexer']) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 8ffc7548059b7..da437fe0148c1 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1474,7 +1474,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird - keyarr = com._asarray_tuplesafe(key) + keyarr = com.asarray_tuplesafe(key) if is_integer_dtype(keyarr): # Cast the indexer to uint64 if possible so diff --git a/pandas/core/series.py b/pandas/core/series.py index 71fa33fd7e2b1..5a6fe07a76e34 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -995,7 +995,7 @@ def _set_labels(self, key, value): if isinstance(key, Index): key = key.values else: - key = com._asarray_tuplesafe(key) + key = com.asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): @@ -4198,7 +4198,7 @@ def _try_cast(arr, take_fast_path): if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: - subarr = com._asarray_tuplesafe(data, dtype=dtype) + subarr = com.asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. diff --git a/pandas/core/window.py b/pandas/core/window.py index 6b6f27bcb3863..f23135c41f333 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -625,7 +625,7 @@ def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): - return com._asarray_tuplesafe(window).astype(float) + return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f2d6fe01e0573..c57b1c3e211f6 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3935,7 +3935,7 @@ def read(self, where=None, columns=None, **kwargs): tuple_index = long_index.values unique_tuples = unique(tuple_index) - unique_tuples = com._asarray_tuplesafe(unique_tuples) + unique_tuples = com.asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) indexer = ensure_platform_int(indexer) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index beebf84b8a033..3bb0b98851234 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -324,7 +324,7 @@ def try_parse(values): if isinstance(values, Index): values = values.values if not isinstance(values, np.ndarray): - values = com._asarray_tuplesafe(values) + values = com.asarray_tuplesafe(values) if is_integer_dtype(values) or is_float_dtype(values): return values diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 66577d738dd28..8b2b74802556d 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1260,17 +1260,17 @@ def test_groupby_sort_multi(): 'd': np.random.randn(3)}) tups = lmap(tuple, df[['a', 'b', 'c']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['a', 'b', 'c'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]]) tups = lmap(tuple, df[['c', 'a', 'b']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['c', 'a', 'b'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups) tups = lmap(tuple, df[['b', 'c', 'a']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['b', 'c', 'a'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]]) @@ -1282,7 +1282,7 @@ def test_groupby_sort_multi(): def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) expected = f(df.groupby(tups)[field]) for k, v in compat.iteritems(expected): assert (result[k] == v) diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index d46e19ef56dd0..c8b3629a6ca6a 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -253,7 +253,7 @@ def get_kwargs_from_breaks(self, breaks, closed='right'): return {'data': tuples} elif is_categorical_dtype(breaks): return {'data': breaks._constructor(tuples)} - return {'data': com._asarray_tuplesafe(tuples)} + return {'data': com.asarray_tuplesafe(tuples)} def test_constructor_errors(self): # non-tuple diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 0dc5970c22803..e179286e839db 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -947,7 +947,7 @@ def test_to_tuples(self, tuples): # GH 18756 idx = IntervalIndex.from_tuples(tuples) result = idx.to_tuples() - expected = Index(com._asarray_tuplesafe(tuples)) + expected = Index(com.asarray_tuplesafe(tuples)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('tuples', [ @@ -963,7 +963,7 @@ def test_to_tuples_na(self, tuples, na_tuple): result = idx.to_tuples(na_tuple=na_tuple) # check the non-NA portion - expected_notna = Index(com._asarray_tuplesafe(tuples[:-1])) + expected_notna = Index(com.asarray_tuplesafe(tuples[:-1])) result_notna = result[:-1] tm.assert_index_equal(result_notna, expected_notna) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..ad8293c09b633 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -217,7 +217,7 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): tm.assert_numpy_array_equal(result[0], np.array(expected_label, dtype=np.intp)) - expected_level_array = com._asarray_tuplesafe(expected_level, + expected_level_array = com.asarray_tuplesafe(expected_level, dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index d0350ba252329..98026f6d4cf0e 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -50,7 +50,7 @@ def test_int64_overflow(self): tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' ]].values)) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) expected = df.groupby(tups).sum()['values'] From 2af7d33d9a166d35cad8ff3487b055952cda9ce5 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:40:34 -0700 Subject: [PATCH 04/17] Deprivatize apply_if_callable --- pandas/core/common.py | 2 +- pandas/core/frame.py | 8 ++++---- pandas/core/generic.py | 6 +++--- pandas/core/indexing.py | 16 ++++++++-------- pandas/core/panel.py | 4 ++-- pandas/core/series.py | 4 ++-- 6 files changed, 20 insertions(+), 20 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 8e62969bae3ac..46d2a92383a78 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -385,7 +385,7 @@ def _get_callable_name(obj): return None -def _apply_if_callable(maybe_callable, obj, **kwargs): +def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 82e499e1e8e8d..4423a8d136d42 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2745,7 +2745,7 @@ def _ixs(self, i, axis=0): return result def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) # shortcut if the key is in columns try: @@ -3197,7 +3197,7 @@ def _box_col_values(self, values, items): return klass(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) @@ -3402,12 +3402,12 @@ def assign(self, **kwargs): # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): - data[k] = com._apply_if_callable(v, data) + data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): - results[k] = com._apply_if_callable(v, data) + results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 866473b01d0e1..bf14e2b3f1731 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7712,7 +7712,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, inplace = validate_bool_kwarg(inplace, 'inplace') # align the cond to same shape as myself - cond = com._apply_if_callable(cond, self) + cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join='right', broadcast_axis=1) else: @@ -7979,7 +7979,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, else: errors = 'ignore' - other = com._apply_if_callable(other, self) + other = com.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level, errors=errors, try_cast=try_cast) @@ -8001,7 +8001,7 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors = 'ignore' inplace = validate_bool_kwarg(inplace, 'inplace') - cond = com._apply_if_callable(cond, self) + cond = com.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index da437fe0148c1..d729f91fab131 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -112,7 +112,7 @@ def __iter__(self): def __getitem__(self, key): if type(key) is tuple: - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: values = self.obj._get_value(*key) @@ -126,7 +126,7 @@ def __getitem__(self, key): # we by definition only have the 0th axis axis = self.axis or 0 - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) return self._getitem_axis(key, axis=axis) def _get_label(self, label, axis=None): @@ -186,10 +186,10 @@ def _get_setitem_indexer(self, key): def __setitem__(self, key, value): if isinstance(key, tuple): - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) @@ -1494,7 +1494,7 @@ class _LocationIndexer(_NDFrameIndexer): def __getitem__(self, key): if type(key) is tuple: - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: if self._is_scalar_access(key): @@ -1506,7 +1506,7 @@ def __getitem__(self, key): # we by definition only have the 0th axis axis = self.axis or 0 - maybe_callable = com._apply_if_callable(key, self.obj) + maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key): @@ -2266,11 +2266,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if isinstance(key, tuple): - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = self._tuplify(key) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 16ade3fae90a1..2f83866c20f57 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -282,7 +282,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): return cls(**d) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) @@ -596,7 +596,7 @@ def _box_item_values(self, key, values): return self._constructor_sliced(values, **d) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( diff --git a/pandas/core/series.py b/pandas/core/series.py index 5a6fe07a76e34..454d8902af85e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -771,7 +771,7 @@ def _slice(self, slobj, axis=0, kind=None): return self._get_values(slobj) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) try: result = self.index.get_value(self, key) @@ -889,7 +889,7 @@ def _get_values(self, indexer): return self._values[indexer] def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) def setitem(key, value): try: From f76712c5cf9844567ae36aa2c09effc2af751e6d Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:41:47 -0700 Subject: [PATCH 05/17] remove unused _long_prod --- pandas/core/common.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 46d2a92383a78..2475bc7d574fa 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -234,13 +234,6 @@ def split_ranges(mask): yield ranges[-1] -def _long_prod(vals): - result = long(1) - for x in vals: - result *= x - return result - - class groupby(dict): """ A simple groupby different from the one in itertools. From 0fe4b8b6809574ec0f4972e9f501dd9a3b19864a Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:42:58 -0700 Subject: [PATCH 06/17] remove unused _mut_exclusive --- pandas/core/common.py | 15 +-------------- pandas/tests/test_common.py | 9 --------- 2 files changed, 1 insertion(+), 23 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 2475bc7d574fa..56838792b82f2 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -11,7 +11,7 @@ from pandas._libs import lib, tslibs from pandas import compat -from pandas.compat import long, zip, iteritems, PY36, OrderedDict +from pandas.compat import zip, iteritems, PY36, OrderedDict from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string @@ -119,19 +119,6 @@ def is_bool_indexer(key): return False -def _mut_exclusive(**kwargs): - item1, item2 = kwargs.items() - label1, val1 = item1 - label2, val2 = item2 - if val1 is not None and val2 is not None: - msg = 'mutually exclusive arguments: {label1!r} and {label2!r}' - raise TypeError(msg.format(label1=label1, label2=label2)) - elif val1 is not None: - return val1 - else: - return val2 - - def _not_none(*args): """Returns a generator consisting of the arguments that are not None""" return (arg for arg in args if arg is not None) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 61f838eeeeb30..5555d7b94aa84 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -15,15 +15,6 @@ import pandas.util.testing as tm -def test_mut_exclusive(): - msg = "mutually exclusive arguments: '[ab]' and '[ab]'" - with tm.assert_raises_regex(TypeError, msg): - com._mut_exclusive(a=1, b=2) - assert com._mut_exclusive(a=1, b=None) == 1 - assert com._mut_exclusive(major=None, major_axis=None) is None - assert com._mut_exclusive(a=None, b=2) == 2 - - def test_get_callable_name(): getname = com._get_callable_name From 6b7fd33b9d5861808c4442aec57f60341233a298 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:43:43 -0700 Subject: [PATCH 07/17] Fixup missed changes --- pandas/core/common.py | 4 ++-- pandas/core/indexes/base.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 56838792b82f2..3c52b631f026c 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -272,7 +272,7 @@ def intersection(*seqs): return type(seqs[0])(list(result)) -def _asarray_tuplesafe(values, dtype=None): +def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): values = list(values) @@ -318,7 +318,7 @@ def _index_labels_to_array(labels, dtype=None): except TypeError: # non-iterable labels = [labels] - labels = _asarray_tuplesafe(labels, dtype=dtype) + labels = asarray_tuplesafe(labels, dtype=dtype) return labels diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 33e3cf35c13c7..24cbc1b486f20 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -368,7 +368,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, else: subarr = com.asarray_tuplesafe(data, dtype=object) - # _asarray_tuplesafe does not always copy underlying data, + # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens if copy: subarr = subarr.copy() From c0e43702292aea69b70fd825b8a9584ac93fa545 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:45:27 -0700 Subject: [PATCH 08/17] deprivatize get_callable_name --- pandas/core/base.py | 2 +- pandas/core/common.py | 4 ++-- pandas/core/groupby/generic.py | 2 +- pandas/core/groupby/ops.py | 2 +- pandas/tests/test_common.py | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pandas/core/base.py b/pandas/core/base.py index 4f49010bd5b73..5382315bad32b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -581,7 +581,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): results.append(colg.aggregate(a)) # make sure we find a good name - name = com._get_callable_name(a) or a + name = com.get_callable_name(a) or a keys.append(name) except (TypeError, DataError): pass diff --git a/pandas/core/common.py b/pandas/core/common.py index 3c52b631f026c..b51417cb8e739 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -348,13 +348,13 @@ def is_full_slice(obj, l): obj.step is None) -def _get_callable_name(obj): +def get_callable_name(obj): # typical case has name if hasattr(obj, '__name__'): return getattr(obj, '__name__') # some objects don't; could recurse if isinstance(obj, partial): - return _get_callable_name(obj.func) + return get_callable_name(obj.func) # fall back to class name if hasattr(obj, '__call__'): return obj.__class__.__name__ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 169416d6f8211..fdededc325b03 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -819,7 +819,7 @@ def _aggregate_multiple_funcs(self, arg, _level): columns.append(f) else: # protect against callables without names - columns.append(com._get_callable_name(f)) + columns.append(com.get_callable_name(f)) arg = lzip(columns, arg) results = {} diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index f2c55a56b119d..65cdd7d401073 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -175,7 +175,7 @@ def apply(self, f, data, axis=0): group_keys = self._get_group_keys() # oh boy - f_name = com._get_callable_name(f) + f_name = com.get_callable_name(f) if (f_name not in base.plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 5555d7b94aa84..2f12e47eb8da2 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -16,7 +16,7 @@ def test_get_callable_name(): - getname = com._get_callable_name + getname = com.get_callable_name def fn(x): return x From a3f1b9dc144532a53d940c8ccfca7de79ba72df7 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:47:21 -0700 Subject: [PATCH 09/17] depricatize try_sort, dict_keys_to_ordered_list --- pandas/core/common.py | 6 +++--- pandas/core/frame.py | 2 +- pandas/core/indexes/api.py | 2 +- pandas/core/panel.py | 2 +- pandas/core/sparse/frame.py | 2 +- pandas/plotting/_core.py | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index b51417cb8e739..8fa1d7a7c0409 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -161,7 +161,7 @@ def _count_not_none(*args): return sum(x is not None for x in args) -def _try_sort(iterable): +def try_sort(iterable): listed = list(iterable) try: return sorted(listed) @@ -169,13 +169,13 @@ def _try_sort(iterable): return listed -def _dict_keys_to_ordered_list(mapping): +def dict_keys_to_ordered_list(mapping): # when pandas drops support for Python < 3.6, this function # can be replaced by a simple list(mapping.keys()) if PY36 or isinstance(mapping, OrderedDict): keys = list(mapping.keys()) else: - keys = _try_sort(mapping) + keys = try_sort(mapping) return keys diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4423a8d136d42..e6ac20f14d930 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -465,7 +465,7 @@ def _init_dict(self, data, index, columns, dtype=None): arrays.loc[missing] = [v] * missing.sum() else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) arrays = [data[k] for k in keys] diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index b409d695a73e8..018a0c4f79445 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -130,7 +130,7 @@ def _sanitize_and_check(indexes): if list in kinds: if len(kinds) > 1: - indexes = [Index(com._try_sort(x)) + indexes = [Index(com.try_sort(x)) if not isinstance(x, Index) else x for x in indexes] kinds.remove(list) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 2f83866c20f57..4ebac55eea137 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -204,7 +204,7 @@ def _init_dict(self, data, axes, dtype=None): for k, v in compat.iteritems(data) if k in haxis) else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) haxis = Index(keys) for k, v in compat.iteritems(data): diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f7071061d07ab..5cb9f4744cc58 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -143,7 +143,7 @@ def _init_dict(self, data, index, columns, dtype=None): columns = ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) columns = Index(keys) if index is None: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 06020bdfd5d1d..7ce4c23f81ad6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -233,7 +233,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): # TODO: unused? # if self.sort_columns: - # columns = com._try_sort(data.columns) + # columns = com.try_sort(data.columns) # else: # columns = data.columns @@ -2428,7 +2428,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, layout=layout) _axes = _flatten(axes) - for i, col in enumerate(com._try_sort(data.columns)): + for i, col in enumerate(com.try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) From 072f7cb535a1634918c8ef1f51a0b7417cd8fec8 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:54:32 -0700 Subject: [PATCH 10/17] deprivatize most of the rest of com --- pandas/core/algorithms.py | 2 +- pandas/core/arrays/datetimes.py | 2 +- pandas/core/arrays/period.py | 4 ++-- pandas/core/arrays/timedeltas.py | 4 ++-- pandas/core/common.py | 20 +++++++++++--------- pandas/core/computation/expressions.py | 4 ++-- pandas/core/computation/pytables.py | 2 +- pandas/core/frame.py | 6 +++--- pandas/core/generic.py | 26 +++++++++++++------------- pandas/core/groupby/ops.py | 2 +- pandas/core/indexes/api.py | 2 +- pandas/core/indexes/base.py | 18 +++++++++--------- pandas/core/indexes/category.py | 2 +- pandas/core/indexes/datetimes.py | 4 ++-- pandas/core/indexes/interval.py | 2 +- pandas/core/indexes/multi.py | 10 +++++----- pandas/core/indexes/numeric.py | 4 ++-- pandas/core/indexes/period.py | 4 ++-- pandas/core/indexes/timedeltas.py | 2 +- pandas/core/nanops.py | 8 ++++---- pandas/core/ops.py | 6 +++--- pandas/core/reshape/concat.py | 2 +- pandas/core/reshape/merge.py | 10 +++++----- pandas/core/reshape/pivot.py | 4 ++-- pandas/core/reshape/util.py | 2 +- pandas/core/series.py | 10 +++++----- pandas/core/sparse/series.py | 2 +- pandas/core/strings.py | 4 ++-- pandas/core/window.py | 2 +- pandas/plotting/_style.py | 2 +- pandas/tests/test_common.py | 16 ++++++++-------- 31 files changed, 95 insertions(+), 93 deletions(-) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 0134c83e7a25a..49705cb6d9ad2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -412,7 +412,7 @@ def isin(comps, values): # handle categoricals return comps._values.isin(values) - comps = com._values_from_object(comps) + comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index c5e85cb5892f4..4e5fe1d5e3f52 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -119,7 +119,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) result = meth(self, np.asarray(other)) - result = com._values_from_object(result) + result = com.values_from_object(result) # Make sure to pass an array to result[...]; indexing with # Series breaks with older version of numpy diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 66b1fb8db25c0..3183899893ed3 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -166,7 +166,7 @@ def _generate_range(cls, start, end, periods, freq, fields): freq = Period._maybe_convert_freq(freq) field_count = len(fields) - if com._count_not_none(start, end) > 0: + if com.count_not_none(start, end) > 0: if field_count > 0: raise ValueError('Can either instantiate from fields ' 'or endpoints, but not both') @@ -399,7 +399,7 @@ def _add_comparison_methods(cls): # Constructor Helpers def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: + if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a28f7fc9c32fa..0cd0466db1eaa 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -80,7 +80,7 @@ def wrapper(self, other): else: other = type(self)(other).values result = meth(self, other) - result = com._values_from_object(result) + result = com.values_from_object(result) o_mask = np.array(isna(other)) if o_mask.any(): @@ -151,7 +151,7 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, @classmethod def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # **kwargs are for compat with TimedeltaIndex, which includes `name` - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') diff --git a/pandas/core/common.py b/pandas/core/common.py index 8fa1d7a7c0409..2c53e9f15d850 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -51,7 +51,7 @@ def flatten(l): yield el -def _consensus_name_attr(objs): +def consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: try: @@ -62,7 +62,8 @@ def _consensus_name_attr(objs): return name -def _get_info_slice(obj, indexer): +# TODO: only used once in frame.py; belongs elsewhere? +def get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' @@ -93,7 +94,7 @@ def maybe_box_datetimelike(value): return value -_values_from_object = lib.values_from_object +values_from_object = lib.values_from_object def is_bool_indexer(key): @@ -156,7 +157,7 @@ def _all_not_none(*args): return True -def _count_not_none(*args): +def count_not_none(*args): """Returns the count of arguments that are not None""" return sum(x is not None for x in args) @@ -296,7 +297,7 @@ def asarray_tuplesafe(values, dtype=None): return result -def _index_labels_to_array(labels, dtype=None): +def index_labels_to_array(labels, dtype=None): """ Transform label or iterable of labels to array, for use in Index. @@ -323,7 +324,7 @@ def _index_labels_to_array(labels, dtype=None): return labels -def _maybe_make_list(obj): +def maybe_make_list(obj): if obj is not None and not isinstance(obj, (tuple, list)): return [obj] return obj @@ -383,7 +384,7 @@ def apply_if_callable(maybe_callable, obj, **kwargs): return maybe_callable -def _dict_compat(d): +def dict_compat(d): """ Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict @@ -443,7 +444,7 @@ class Sentinel(object): return Sentinel() -def _random_state(state=None): +def random_state(state=None): """ Helper function for processing random_state arguments. @@ -472,7 +473,8 @@ def _random_state(state=None): "RandomState, or None") -def _get_distinct_objs(objs): +# TODO: only used once in indexes.api; belongs elsewhere? +def get_distinct_objs(objs): """ Return a list with distinct elements of "objs" (different ids). Preserves order. diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 781101f5804e6..ac552e7b80de3 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -123,8 +123,8 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, def _where_standard(cond, a, b): - return np.where(com._values_from_object(cond), com._values_from_object(a), - com._values_from_object(b)) + return np.where(com.values_from_object(cond), com.values_from_object(a), + com.values_from_object(b)) def _where_numexpr(cond, a, b): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 26eefa75b2675..2bd1b0c5b3507 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -190,7 +190,7 @@ def stringify(value): v = _coerce_scalar_to_timedelta_type(v, unit='s').value return TermValue(int(v), v, kind) elif meta == u('category'): - metadata = com._values_from_object(self.metadata) + metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e6ac20f14d930..60ef85cf2bbc4 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3182,7 +3182,7 @@ def is_dtype_instance_mapper(idx, dtype): exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these - return self.loc[com._get_info_slice(self, dtype_indexer)] + return self.loc[com.get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] @@ -7826,7 +7826,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) - values = com._values_from_object(s) + values = com.values_from_object(s) aligned_values.append(algorithms.take_1d(values, indexer)) values = np.vstack(aligned_values) @@ -7914,7 +7914,7 @@ def _homogenize(data, index, dtype=None): oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - v = com._dict_compat(v) + v = com.dict_compat(v) else: v = dict(v) v = lib.fast_multiget(v, oindex.values, default=np.nan) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index bf14e2b3f1731..da76ec0843fce 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1090,7 +1090,7 @@ def rename(self, *args, **kwargs): raise TypeError('rename() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) - if com._count_not_none(*axes.values()) == 0: + if com.count_not_none(*axes.values()) == 0: raise TypeError('must pass an index to rename') # renamer function if passed a dict @@ -1265,7 +1265,7 @@ def _indexed_same(self, other): for a in self._AXIS_ORDERS) def __neg__(self): - values = com._values_from_object(self) + values = com.values_from_object(self) if is_bool_dtype(values): arr = operator.inv(values) elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) @@ -1277,7 +1277,7 @@ def __neg__(self): return self.__array_wrap__(arr) def __pos__(self): - values = com._values_from_object(self) + values = com.values_from_object(self) if (is_bool_dtype(values) or is_period_arraylike(values)): arr = values elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) @@ -1290,7 +1290,7 @@ def __pos__(self): def __invert__(self): try: - arr = operator.inv(com._values_from_object(self)) + arr = operator.inv(com.values_from_object(self)) return self.__array_wrap__(arr) except Exception: @@ -1587,7 +1587,7 @@ def _drop_labels_or_levels(self, keys, axis=0): .format(type=type(self))) # Validate keys - keys = com._maybe_make_list(keys) + keys = com.maybe_make_list(keys) invalid_keys = [k for k in keys if not self._is_label_or_level_reference(k, axis=axis)] @@ -1753,7 +1753,7 @@ def __round__(self, decimals=0): # Array Interface def __array__(self, dtype=None): - return com._values_from_object(self) + return com.values_from_object(self) def __array_wrap__(self, result, context=None): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) @@ -3295,7 +3295,7 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): # Case for non-unique axis else: - labels = ensure_object(com._index_labels_to_array(labels)) + labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') @@ -3860,7 +3860,7 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, def _needs_reindex_multi(self, axes, method, level): """Check if we do need a multi reindex.""" - return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and + return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type) def _reindex_multi(self, axes, copy, fill_value): @@ -4034,7 +4034,7 @@ def filter(self, items=None, like=None, regex=None, axis=None): """ import re - nkw = com._count_not_none(items, like, regex) + nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') @@ -4280,7 +4280,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, axis_length = self.shape[axis] # Process random_state argument - rs = com._random_state(random_state) + rs = com.random_state(random_state) # Check weights for compliance if weights is not None: @@ -7782,7 +7782,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if try_quick: try: - new_other = com._values_from_object(self) + new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other other = new_other @@ -8949,7 +8949,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, **kwargs)) - 1) rs = rs.reindex_like(data) if freq is None: - mask = isna(com._values_from_object(data)) + mask = isna(com.values_from_object(data)) np.putmask(rs.values, mask, np.nan) return rs @@ -9880,7 +9880,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): else: axis = self._get_axis_number(axis) - y = com._values_from_object(self).copy() + y = com.values_from_object(self).copy() if (skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64))): diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 65cdd7d401073..38ac144ac6c95 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -209,7 +209,7 @@ def indices(self): return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] - keys = [com._values_from_object(ping.group_index) + keys = [com.values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys) diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 018a0c4f79445..3f3448d104165 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -55,7 +55,7 @@ def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True): def _get_combined_index(indexes, intersect=False, sort=False): # TODO: handle index names! - indexes = com._get_distinct_objs(indexes) + indexes = com.get_distinct_objs(indexes) if len(indexes) == 0: index = Index([]) elif len(indexes) == 1: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 24cbc1b486f20..dd7d578ed2bc6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1981,7 +1981,7 @@ def __getitem__(self, key): if com.is_bool_indexer(key): key = np.asarray(key) - key = com._values_from_object(key) + key = com.values_from_object(key) result = getitem(key) if not is_scalar(result): return promote(result) @@ -2347,8 +2347,8 @@ def equals(self, other): return other.equals(self) try: - return array_equivalent(com._values_from_object(self), - com._values_from_object(other)) + return array_equivalent(com.values_from_object(self), + com.values_from_object(other)) except Exception: return False @@ -3050,8 +3050,8 @@ def get_value(self, series, key): elif is_integer(key): return s[key] - s = com._values_from_object(series) - k = com._values_from_object(key) + s = com.values_from_object(series) + k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: @@ -3084,8 +3084,8 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(com._values_from_object(arr), - com._values_from_object(key), value) + self._engine.set_value(com.values_from_object(arr), + com.values_from_object(key), value) def _get_level_values(self, level): """ @@ -4410,7 +4410,7 @@ def drop(self, labels, errors='raise'): If not all of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None - labels = com._index_labels_to_array(labels, dtype=arr_dtype) + labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -4703,7 +4703,7 @@ def _validate_for_numeric_binop(self, other, op): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") - other = com._values_from_object(other) + other = com.values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 91f1f84dfbe96..d76a7ef00f625 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -440,7 +440,7 @@ def get_value(self, series, key): know what you're doing """ try: - k = com._values_from_object(key) + k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 06f533796e557..0e1d2019c83b4 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -395,7 +395,7 @@ def __new__(cls, data=None, @classmethod def _generate_range(cls, start, end, periods, name, freq, tz=None, normalize=False, ambiguous='raise', closed=None): - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') @@ -1331,7 +1331,7 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key, tz=self.tz) elif not isinstance(key, Timestamp): key = Timestamp(key) - values = self._engine.get_value(com._values_from_object(series), + values = self._engine.get_value(com.values_from_object(series), key, tz=self.tz) return com.maybe_box(self, values, series, key) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index f96ea62d73bb7..246bd3d541b72 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1127,7 +1127,7 @@ def interval_range(start=None, end=None, periods=None, freq=None, if freq is None and com._any_none(periods, start, end): freq = 1 if is_number(endpoint) else 'D' - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and ' 'freq, exactly three must be specified') diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 0d4ceb2783bad..7d24a901382bb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -950,8 +950,8 @@ def get_value(self, series, key): from pandas.core.indexing import maybe_droplevels # Label-based - s = com._values_from_object(series) - k = com._values_from_object(key) + s = com.values_from_object(series) + k = com.values_from_object(key) def _try_mi(k): # TODO: what if a level contains tuples?? @@ -1691,7 +1691,7 @@ def drop(self, labels, level=None, errors='raise'): try: if not isinstance(labels, (np.ndarray, Index)): - labels = com._index_labels_to_array(labels) + labels = com.index_labels_to_array(labels) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -1730,7 +1730,7 @@ def drop(self, labels, level=None, errors='raise'): return self.delete(inds) def _drop_from_level(self, labels, level): - labels = com._index_labels_to_array(labels) + labels = com.index_labels_to_array(labels) i = self._get_level_number(level) index = self.levels[i] values = index.get_indexer(labels) @@ -2628,7 +2628,7 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - other_vals = com._values_from_object(ensure_index(other)) + other_vals = com.values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 651cf07cba5bf..ea392d0b93377 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -354,9 +354,9 @@ def get_value(self, series, key): if not is_scalar(key): raise InvalidIndexError - k = com._values_from_object(key) + k = com.values_from_object(key) loc = self.get_loc(k) - new_values = com._values_from_object(series)[loc] + new_values = com.values_from_object(series)[loc] return new_values diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index f6ac84b65e4d2..47dd0492a8d5d 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -525,7 +525,7 @@ def get_value(self, series, key): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - s = com._values_from_object(series) + s = com.values_from_object(series) try: return com.maybe_box(self, super(PeriodIndex, self).get_value(s, key), @@ -866,7 +866,7 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None): PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]', freq='M') """ - if com._count_not_none(start, end, periods) != 2: + if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 15d07508542cb..b9a0c55e947c0 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -516,7 +516,7 @@ def get_value(self, series, key): def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) - values = self._engine.get_value(com._values_from_object(series), key) + values = self._engine.get_value(com.values_from_object(series), key) return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index de31c6ac11c3f..32fd70bcf654d 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -205,7 +205,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, if necessary copy and mask using the specified fill_value copy = True will force the copy """ - values = com._values_from_object(values) + values = com.values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -440,7 +440,7 @@ def nanstd(values, axis=None, skipna=True, ddof=1): @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): - values = com._values_from_object(values) + values = com.values_from_object(values) dtype = values.dtype mask = isna(values) if is_any_int_dtype(values): @@ -549,7 +549,7 @@ def nanskew(values, axis=None, skipna=True): """ - values = com._values_from_object(values) + values = com.values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') @@ -607,7 +607,7 @@ def nankurt(values, axis=None, skipna=True): central moment. """ - values = com._values_from_object(values) + values = com.values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') diff --git a/pandas/core/ops.py b/pandas/core/ops.py index bccc5a587bd83..49c80296fdb5f 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -89,7 +89,7 @@ def _maybe_match_name(a, b): See also -------- - pandas.core.common._consensus_name_attr + pandas.core.common.consensus_name_attr """ a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') @@ -1082,7 +1082,7 @@ def na_op(x, y): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) mask = notna(x) & notna(y) - result[mask] = op(x[mask], com._values_from_object(y[mask])) + result[mask] = op(x[mask], com.values_from_object(y[mask])) else: assert isinstance(x, np.ndarray) result = np.empty(len(x), dtype=x.dtype) @@ -1363,7 +1363,7 @@ def wrapper(self, other, axis=None): .format(typ=type(other))) # always return a full value series here - res_values = com._values_from_object(res) + res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 1d6105cb68bf1..1c602a0af1ec1 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -385,7 +385,7 @@ def get_result(self): # stack blocks if self.axis == 0: - name = com._consensus_name_attr(self.objs) + name = com.consensus_name_attr(self.objs) mgr = self.objs[0]._data.concat([x._data for x in self.objs], self.new_axes) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 25d8cb4e804a2..f54daf368822e 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -498,9 +498,9 @@ def __init__(self, left, right, how='inner', on=None, self.how = how self.axis = axis - self.on = com._maybe_make_list(on) - self.left_on = com._maybe_make_list(left_on) - self.right_on = com._maybe_make_list(right_on) + self.on = com.maybe_make_list(on) + self.left_on = com.maybe_make_list(left_on) + self.right_on = com.maybe_make_list(right_on) self.copy = copy self.suffixes = suffixes @@ -1557,8 +1557,8 @@ def _factorize_keys(lk, rk, sort=True): rk = ensure_int64(rk) elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): klass = libhashtable.Int64Factorizer - lk = ensure_int64(com._values_from_object(lk)) - rk = ensure_int64(com._values_from_object(rk)) + lk = ensure_int64(com.values_from_object(lk)) + rk = ensure_int64(com.values_from_object(rk)) else: klass = libhashtable.Factorizer lk = ensure_object(lk) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index da5246d389817..0d1caa3d57d73 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -470,8 +470,8 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, crosstab : DataFrame """ - index = com._maybe_make_list(index) - columns = com._maybe_make_list(columns) + index = com.maybe_make_list(index) + columns = com.maybe_make_list(columns) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 2fe82e5d6bc57..e83bcf800e949 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -60,7 +60,7 @@ def cartesian_product(X): # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) - return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), + return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]), np.product(a[i])) for i, x in enumerate(X)] diff --git a/pandas/core/series.py b/pandas/core/series.py index 454d8902af85e..ea8ee50dbfd6f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1423,7 +1423,7 @@ def count(self, level=None): nobs : int or Series (if level specified) """ if level is None: - return notna(com._values_from_object(self)).sum() + return notna(com.values_from_object(self)).sum() if isinstance(level, compat.string_types): level = self.index._get_level_number(level) @@ -1727,7 +1727,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): nan """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - i = nanops.nanargmin(com._values_from_object(self), skipna=skipna) + i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1797,7 +1797,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): nan """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - i = nanops.nanargmax(com._values_from_object(self), skipna=skipna) + i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1840,7 +1840,7 @@ def round(self, decimals=0, *args, **kwargs): """ nv.validate_round(args, kwargs) - result = com._values_from_object(self).round(decimals) + result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result @@ -2008,7 +2008,7 @@ def diff(self, periods=1): 5 NaN dtype: float64 """ - result = algorithms.diff(com._values_from_object(self), periods) + result = algorithms.diff(com.values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self, lag=1): diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 96ee5b7954f45..1a92a27bfb390 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -375,7 +375,7 @@ def __getitem__(self, key): # Could not hash item, must be array-like? pass - key = com._values_from_object(key) + key = com.values_from_object(key) if self.index.nlevels > 1 and isinstance(key, tuple): # to handle MultiIndex labels key = self.index.get_loc(key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index e4765c00f80fd..e07b2152641b3 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -54,7 +54,7 @@ def _get_array_list(arr, others): """ from pandas.core.series import Series - if len(others) and isinstance(com._values_from_object(others)[0], + if len(others) and isinstance(com.values_from_object(others)[0], (list, np.ndarray, Series)): arrays = [arr] + list(others) else: @@ -701,7 +701,7 @@ def rep(x, r): return compat.text_type.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = libops.vec_binop(com._values_from_object(arr), repeats, rep) + result = libops.vec_binop(com.values_from_object(arr), repeats, rep) return result diff --git a/pandas/core/window.py b/pandas/core/window.py index f23135c41f333..f3b4aaa74ec6b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -2467,7 +2467,7 @@ def dataframe_from_int_dict(data, frame_template): def _get_center_of_mass(comass, span, halflife, alpha): - valid_count = com._count_not_none(comass, span, halflife, alpha) + valid_count = com.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 426b29a8840f4..c72e092c73aa2 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -49,7 +49,7 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', def random_color(column): """ Returns a random color represented as a list of length 3""" # GH17525 use common._random_state to avoid resetting the seed - rs = com._random_state(column) + rs = com.random_state(column) return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 2f12e47eb8da2..091416b9c03e6 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -139,22 +139,22 @@ def test_groupby(): def test_random_state(): import numpy.random as npr # Check with seed - state = com._random_state(5) + state = com.random_state(5) assert state.uniform() == npr.RandomState(5).uniform() # Check with random state object state2 = npr.RandomState(10) - assert com._random_state(state2).uniform() == npr.RandomState(10).uniform() + assert com.random_state(state2).uniform() == npr.RandomState(10).uniform() # check with no arg random state - assert com._random_state() is np.random + assert com.random_state() is np.random # Error for floats or strings with pytest.raises(ValueError): - com._random_state('test') + com.random_state('test') with pytest.raises(ValueError): - com._random_state(5.5) + com.random_state(5.5) @pytest.mark.parametrize('left, right, expected', [ @@ -173,9 +173,9 @@ def test_dict_compat(): np.datetime64('2015-03-15'): 2} data_unchanged = {1: 2, 3: 4, 5: 6} expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2} - assert (com._dict_compat(data_datetime64) == expected) - assert (com._dict_compat(expected) == expected) - assert (com._dict_compat(data_unchanged) == data_unchanged) + assert (com.dict_compat(data_datetime64) == expected) + assert (com.dict_compat(expected) == expected) + assert (com.dict_compat(data_unchanged) == data_unchanged) def test_standardize_mapping(): From beb280bf2fb7aae0b08f4a470bc28b378c8b6e59 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:57:34 -0700 Subject: [PATCH 11/17] Flake8 fixups --- pandas/core/common.py | 2 +- pandas/core/indexes/datetimes.py | 2 +- pandas/core/indexes/period.py | 8 ++++---- pandas/core/indexes/timedeltas.py | 2 +- pandas/tests/test_algos.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 2c53e9f15d850..604e54177314e 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -100,7 +100,7 @@ def maybe_box_datetimelike(value): def is_bool_indexer(key): if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)): if key.dtype == np.object_: - key = np.asarray(_values_from_object(key)) + key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 0e1d2019c83b4..72477c03f7b93 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -1312,7 +1312,7 @@ def get_value(self, series, key): try: return com.maybe_box(self, Index.get_value(self, series, key), - series, key) + series, key) except KeyError: try: loc = self._get_string_slice(key) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 47dd0492a8d5d..5261eb587d4a8 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -528,8 +528,8 @@ def get_value(self, series, key): s = com.values_from_object(series) try: return com.maybe_box(self, - super(PeriodIndex, self).get_value(s, key), - series, key) + super(PeriodIndex, self).get_value(s, key), + series, key) except (KeyError, IndexError): try: asdt, parsed, reso = parse_time_string(key, self.freq) @@ -553,7 +553,7 @@ def get_value(self, series, key): elif grp == freqn: key = Period(asdt, freq=self.freq).ordinal return com.maybe_box(self, self._engine.get_value(s, key), - series, key) + series, key) else: raise KeyError(key) except TypeError: @@ -561,7 +561,7 @@ def get_value(self, series, key): key = Period(key, self.freq).ordinal return com.maybe_box(self, self._engine.get_value(s, key), - series, key) + series, key) @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index b9a0c55e947c0..7e3561dc46564 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -500,7 +500,7 @@ def get_value(self, series, key): try: return com.maybe_box(self, Index.get_value(self, series, key), - series, key) + series, key) except KeyError: try: loc = self._get_string_slice(key) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index ad8293c09b633..62b37a35249d0 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -218,7 +218,7 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): np.array(expected_label, dtype=np.intp)) expected_level_array = com.asarray_tuplesafe(expected_level, - dtype=object) + dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) def test_complex_sorting(self): From 5264a0d0d3fbcddad93df41ee81554e637e1552c Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 11:59:47 -0700 Subject: [PATCH 12/17] remove unused set functions --- pandas/core/common.py | 30 ---------------------- pandas/tests/computation/test_eval.py | 3 +-- pandas/tests/test_common.py | 36 --------------------------- 3 files changed, 1 insertion(+), 68 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 604e54177314e..6190459785b2b 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -243,36 +243,6 @@ def __iter__(self): return iter(dict.items(self)) -def map_indices_py(arr): - """ - Returns a dictionary with (element, index) pairs for each element in the - given array/list - """ - return {x: i for i, x in enumerate(arr)} - - -def union(*seqs): - result = set([]) - for seq in seqs: - if not isinstance(seq, set): - seq = set(seq) - result |= seq - return type(seqs[0])(list(result)) - - -def difference(a, b): - return type(a)(list(set(a) - set(b))) - - -def intersection(*seqs): - result = set(seqs[0]) - for seq in seqs: - if not isinstance(seq, set): - seq = set(seq) - result &= seq - return type(seqs[0])(list(result)) - - def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 07ba0b681418e..118b05d16ab09 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar import pandas as pd -from pandas.core import common as com from pandas.errors import PerformanceWarning from pandas import DataFrame, Series, Panel, date_range from pandas.util.testing import makeCustomDataframe as mkdf @@ -94,7 +93,7 @@ def _is_py3_complex_incompat(result, expected): np.isnan(result)) -_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms) +_good_arith_ops = set(_arith_ops_syms).difference(_special_case_arith_ops_syms) @td.skip_if_no_ne diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 091416b9c03e6..850e8158fa89f 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -88,42 +88,6 @@ def test_locs(mask): test_locs([1]) -def test_map_indices_py(): - data = [4, 3, 2, 1] - expected = {4: 0, 3: 1, 2: 2, 1: 3} - - result = com.map_indices_py(data) - - assert (result == expected) - - -def test_union(): - a = [1, 2, 3] - b = [4, 5, 6] - - union = sorted(com.union(a, b)) - - assert ((a + b) == union) - - -def test_difference(): - a = [1, 2, 3] - b = [1, 2, 3, 4, 5, 6] - - inter = sorted(com.difference(b, a)) - - assert ([4, 5, 6] == inter) - - -def test_intersection(): - a = [1, 2, 3] - b = [1, 2, 3, 4, 5, 6] - - inter = sorted(com.intersection(a, b)) - - assert (a == inter) - - def test_groupby(): values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3'] expected = {'f': ['foo', 'foo3'], From 7430b96e9bd680a4ad3bd2bd803a2c11b0de5d20 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 12:00:34 -0700 Subject: [PATCH 13/17] remove unused groupby --- pandas/core/common.py | 21 --------------------- pandas/tests/test_common.py | 12 ------------ 2 files changed, 33 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 6190459785b2b..58044fb1d7093 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -222,27 +222,6 @@ def split_ranges(mask): yield ranges[-1] -class groupby(dict): - """ - A simple groupby different from the one in itertools. - - Does not require the sequence elements to be sorted by keys, - however it is slower. - """ - - def __init__(self, seq, key=lambda x: x): - for value in seq: - k = key(value) - self.setdefault(k, []).append(value) - - try: - __iter__ = dict.iteritems - except AttributeError: # pragma: no cover - # Python 3 - def __iter__(self): - return iter(dict.items(self)) - - def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 850e8158fa89f..631065ca6d003 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -88,18 +88,6 @@ def test_locs(mask): test_locs([1]) -def test_groupby(): - values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3'] - expected = {'f': ['foo', 'foo3'], - 'b': ['bar', 'baz', 'baz2'], - 'q': ['qux']} - - grouped = com.groupby(values, lambda x: x[0]) - - for k, v in grouped: - assert v == expected[k] - - def test_random_state(): import numpy.random as npr # Check with seed From a34fe9fd32231da8d07220588c7371cffbcfaeac Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 12:01:36 -0700 Subject: [PATCH 14/17] remove unused split_ranges --- pandas/core/common.py | 20 +------------------- pandas/tests/test_common.py | 30 ------------------------------ 2 files changed, 1 insertion(+), 49 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index 58044fb1d7093..f729f8f667635 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -203,25 +203,6 @@ def iterpairs(seq): return zip(seq_it, seq_it_next) -def split_ranges(mask): - """ Generates tuples of ranges which cover all True value in mask - - >>> list(split_ranges([1,0,0,1,0])) - [(0, 1), (3, 4)] - """ - ranges = [(0, len(mask))] - - for pos, val in enumerate(mask): - if not val: # this pos should be omitted, split off the prefix range - r = ranges.pop() - if pos > r[0]: # yield non-zero range - yield (r[0], pos) - if pos + 1 < len(mask): # save the rest for processing - ranges.append((pos + 1, len(mask))) - if ranges: - yield ranges[-1] - - def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): @@ -292,6 +273,7 @@ def is_true_slices(l): return [isinstance(k, slice) and not is_null_slice(k) for k in l] +# TODO: used only once in indexing; belongs elsewhere? def is_full_slice(obj, l): """ we have a full length slice """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 631065ca6d003..f80e1699346df 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -58,36 +58,6 @@ def test_iterpairs(): assert (result == expected) -def test_split_ranges(): - def _bin(x, width): - "return int(x) as a base2 string of given width" - return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1)) - - def test_locs(mask): - nfalse = sum(np.array(mask) == 0) - - remaining = 0 - for s, e in com.split_ranges(mask): - remaining += e - s - - assert 0 not in mask[s:e] - - # make sure the total items covered by the ranges are a complete cover - assert remaining + nfalse == len(mask) - - # exhaustively test all possible mask sequences of length 8 - ncols = 8 - for i in range(2 ** ncols): - cols = lmap(int, list(_bin(i, ncols))) # count up in base2 - mask = [cols[i] == 1 for i in range(len(cols))] - test_locs(mask) - - # base cases - test_locs([]) - test_locs([0]) - test_locs([1]) - - def test_random_state(): import numpy.random as npr # Check with seed From cc28772a0566b18b4afcbe53561f0bb6748b770a Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 12:03:18 -0700 Subject: [PATCH 15/17] remove unused iterpairs --- pandas/core/common.py | 25 +------------------------ pandas/tests/test_common.py | 9 --------- 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/pandas/core/common.py b/pandas/core/common.py index f729f8f667635..166cad1e801d2 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -11,7 +11,7 @@ from pandas._libs import lib, tslibs from pandas import compat -from pandas.compat import zip, iteritems, PY36, OrderedDict +from pandas.compat import iteritems, PY36, OrderedDict from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string @@ -180,29 +180,6 @@ def dict_keys_to_ordered_list(mapping): return keys -def iterpairs(seq): - """ - Parameters - ---------- - seq : sequence - - Returns - ------- - iterator returning overlapping pairs of elements - - Examples - -------- - >>> list(iterpairs([1, 2, 3, 4])) - [(1, 2), (2, 3), (3, 4)] - """ - # input may not be sliceable - seq_it = iter(seq) - seq_it_next = iter(seq) - next(seq_it_next) - - return zip(seq_it, seq_it_next) - - def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index f80e1699346df..77a2bdf21c47d 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -49,15 +49,6 @@ def test_all_not_none(): assert (not com._all_not_none(None, None, None, None)) -def test_iterpairs(): - data = [1, 2, 3, 4] - expected = [(1, 2), (2, 3), (3, 4)] - - result = list(com.iterpairs(data)) - - assert (result == expected) - - def test_random_state(): import numpy.random as npr # Check with seed From 892ba575c99ac15e5001ba65c5fc5c1cc01dbe53 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 20 Jul 2018 12:50:59 -0700 Subject: [PATCH 16/17] flake8 fixup unused imports --- pandas/tests/test_common.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 77a2bdf21c47d..e1c9202189972 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -8,7 +8,6 @@ import numpy as np from pandas import Series, DataFrame, Timestamp -from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops from pandas.io.common import _get_handle From 00ba0d0d089d4d437783c1ae216c3011cb209b24 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Mon, 23 Jul 2018 21:05:02 -0700 Subject: [PATCH 17/17] whatsnew note, comment in docstring --- doc/source/whatsnew/v0.24.0.txt | 2 +- pandas/core/common.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 973b75f0e1451..5e59fe6101f75 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -389,7 +389,7 @@ Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`) -- +- Several private functions were removed from the (non-public) module ``pandas.core.common`` (:issue:`22001`) - - diff --git a/pandas/core/common.py b/pandas/core/common.py index 166cad1e801d2..0350b338f2bee 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1,5 +1,7 @@ """ Misc tools for implementing data structures + +Note: pandas.core.common is *not* part of the public API. """ from datetime import datetime, timedelta