From b6cb0557f74f0f6fb8458e07ed7e7af82af7c102 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Wed, 17 Jan 2018 20:24:43 -0800 Subject: [PATCH 1/3] cleanup inconsitently used imports --- pandas/core/categorical.py | 7 ++-- pandas/core/frame.py | 44 ++++++++++------------- pandas/core/generic.py | 20 +++++------ pandas/core/indexes/datetimelike.py | 4 ++- pandas/core/panel.py | 6 ++-- pandas/core/series.py | 54 ++++++++++++----------------- pandas/core/sparse/frame.py | 12 +++---- pandas/core/sparse/series.py | 5 ++- pandas/core/strings.py | 1 - 9 files changed, 66 insertions(+), 87 deletions(-) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 7b11e37a14b51..be364337f8f82 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -28,7 +28,6 @@ is_list_like, is_sequence, is_scalar, is_dict_like) -from pandas.core.common import is_null_slice, _maybe_box_datetimelike from pandas.core.algorithms import factorize, take_1d, unique1d from pandas.core.accessor import PandasDelegate @@ -468,7 +467,7 @@ def tolist(self): (for Timestamp/Timedelta/Interval/Period) """ if is_datetimelike(self.categories): - return [_maybe_box_datetimelike(x) for x in self] + return [com._maybe_box_datetimelike(x) for x in self] return np.array(self).tolist() @property @@ -1686,7 +1685,7 @@ def _slice(self, slicer): # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if isinstance(slicer, tuple) and len(slicer) == 2: - if not is_null_slice(slicer[0]): + if not com.is_null_slice(slicer[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") slicer = slicer[1] @@ -1847,7 +1846,7 @@ def __setitem__(self, key, value): # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if len(key) == 2: - if not is_null_slice(key[0]): + if not com.is_null_slice(key[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") key = key[1] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 35cc7a2a34acb..e46edab1a7386 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -62,12 +62,6 @@ from pandas.core.dtypes.missing import isna, notna -from pandas.core.common import (_try_sort, - _default_index, - _values_from_object, - _maybe_box_datetimelike, - _dict_compat, - standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, _ensure_index_from_sequences) @@ -387,9 +381,9 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): - index = _default_index(len(data[0])) + index = com._default_index(len(data[0])) else: - index = _default_index(len(data)) + index = com._default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) @@ -466,7 +460,7 @@ def _init_dict(self, data, index, columns, dtype=None): else: keys = list(data.keys()) if not isinstance(data, OrderedDict): - keys = _try_sort(keys) + keys = com._try_sort(keys) columns = data_names = Index(keys) arrays = [data[k] for k in keys] @@ -493,12 +487,12 @@ def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: - index = _default_index(N) + index = com._default_index(N) else: index = _ensure_index(index) if columns is None: - columns = _default_index(K) + columns = com._default_index(K) else: columns = _ensure_index(columns) return index, columns @@ -990,7 +984,7 @@ def to_dict(self, orient='dict', into=dict): "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 - into_c = standardize_mapping(into) + into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in compat.iteritems(self)) @@ -1000,13 +994,13 @@ def to_dict(self, orient='dict', into=dict): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', lib.map_infer(self.values.ravel(), - _maybe_box_datetimelike) + com._maybe_box_datetimelike) .reshape(self.values.shape).tolist()))) elif orient.lower().startswith('s'): - return into_c((k, _maybe_box_datetimelike(v)) + return into_c((k, com._maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): - return [into_c((k, _maybe_box_datetimelike(v)) + return [into_c((k, com._maybe_box_datetimelike(v)) for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): @@ -2006,7 +2000,7 @@ def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) - return _maybe_box_datetimelike(series._values[index]) + return com._maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine @@ -3371,7 +3365,7 @@ def _maybe_casted_values(index, labels=None): values, mask, np.nan) return values - new_index = _default_index(len(new_obj)) + new_index = com._default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] @@ -6084,7 +6078,7 @@ def extract_index(data): (lengths[0], len(index))) raise ValueError(msg) else: - index = _default_index(lengths[0]) + index = com._default_index(lengths[0]) return _ensure_index(index) @@ -6155,7 +6149,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: - columns = _default_index(len(data)) + columns = com._default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): @@ -6179,7 +6173,7 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): if index is None: index = _get_names_from_index(fdata) if index is None: - index = _default_index(len(data)) + index = com._default_index(len(data)) index = _ensure_index(index) if columns is not None: @@ -6239,14 +6233,14 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): for s in data: index = getattr(s, 'index', None) if index is None: - index = _default_index(len(s)) + index = com._default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) - values = _values_from_object(s) + values = com._values_from_object(s) aligned_values.append(algorithms.take_1d(values, indexer)) values = np.vstack(aligned_values) @@ -6276,7 +6270,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: - columns = _default_index(len(content)) + columns = com._default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... @@ -6298,7 +6292,7 @@ def convert(arr): def _get_names_from_index(data): has_some_name = any(getattr(s, 'name', None) is not None for s in data) if not has_some_name: - return _default_index(len(data)) + return com._default_index(len(data)) index = lrange(len(data)) count = 0 @@ -6333,7 +6327,7 @@ def _homogenize(data, index, dtype=None): oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - v = _dict_compat(v) + v = com._dict_compat(v) else: v = dict(v) v = lib.fast_multiget(v, oindex.values, default=np.nan) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7ffef9c8a86d7..063cb8912016f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -30,9 +30,7 @@ from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame -from pandas.core.common import (_count_not_none, - _maybe_box_datetimelike, _values_from_object, - AbstractMethodError, SettingWithCopyError, +from pandas.core.common import (AbstractMethodError, SettingWithCopyError, SettingWithCopyWarning) from pandas.core.base import PandasObject, SelectionMixin @@ -1026,7 +1024,7 @@ def _indexed_same(self, other): for a in self._AXIS_ORDERS) def __neg__(self): - values = _values_from_object(self) + values = com._values_from_object(self) if values.dtype == np.bool_: arr = operator.inv(values) else: @@ -1035,7 +1033,7 @@ def __neg__(self): def __invert__(self): try: - arr = operator.inv(_values_from_object(self)) + arr = operator.inv(com._values_from_object(self)) return self.__array_wrap__(arr) except Exception: @@ -1490,7 +1488,7 @@ def __round__(self, decimals=0): # Array Interface def __array__(self, dtype=None): - return _values_from_object(self) + return com._values_from_object(self) def __array_wrap__(self, result, context=None): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) @@ -2696,7 +2694,7 @@ def xs(self, key, axis=0, level=None, drop_level=True): # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) if not is_list_like(new_values) or self.ndim == 1: - return _maybe_box_datetimelike(new_values) + return com._maybe_box_datetimelike(new_values) result = self._constructor_sliced( new_values, index=self.columns, @@ -3557,7 +3555,7 @@ def filter(self, items=None, like=None, regex=None, axis=None): """ import re - nkw = _count_not_none(items, like, regex) + nkw = com._count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') @@ -6357,7 +6355,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if try_quick: try: - new_other = _values_from_object(self).copy() + new_other = com._values_from_object(self).copy() new_other[icond] = other other = new_other except Exception: @@ -7318,7 +7316,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1) if freq is None: - mask = isna(_values_from_object(self)) + mask = isna(com._values_from_object(self)) np.putmask(rs.values, mask, np.nan) return rs @@ -7778,7 +7776,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): else: axis = self._get_axis_number(axis) - y = _values_from_object(self).copy() + y = com._values_from_object(self).copy() if (skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64))): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 7bb6708e03421..6d28ab8308a3f 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -10,6 +10,7 @@ from pandas.core.tools.timedeltas import to_timedelta import numpy as np + from pandas.core.dtypes.common import ( _ensure_int64, is_dtype_equal, @@ -29,6 +30,8 @@ from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna +import pandas.core.dtypes.concat as _concat + from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr from pandas.core.common import AbstractMethodError @@ -41,7 +44,6 @@ from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly -import pandas.core.dtypes.concat as _concat import pandas.tseries.frequencies as frequencies import pandas.core.indexes.base as ibase diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1df69576e6ff2..1a19a6b6e7cfe 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -31,7 +31,7 @@ from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) -from pandas.core.ops import _op_descriptions + from pandas.core.series import Series from pandas.core.reshape.util import cartesian_product from pandas.util._decorators import Appender @@ -1545,9 +1545,9 @@ def na_op(x, y): result = missing.fill_zeros(result, x, y, name, fill_zeros) return result - if name in _op_descriptions: + if name in ops._op_descriptions: op_name = name.replace('__', '') - op_desc = _op_descriptions[op_name] + op_desc = ops._op_descriptions[op_name] if op_desc['reversed']: equiv = 'other ' + op_desc['op'] + ' panel' else: diff --git a/pandas/core/series.py b/pandas/core/series.py index 73a7fe1fd89e9..5b6795083557f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -39,19 +39,11 @@ construct_1d_arraylike_from_scalar) from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike -from pandas.core.common import (is_bool_indexer, - _default_index, - _asarray_tuplesafe, - _values_from_object, - _maybe_match_name, - SettingWithCopyError, - _maybe_box_datetimelike, - standardize_mapping, - _any_none) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, Float64Index, _ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices -from pandas.core import generic, base + +from pandas.core import generic, base, algorithms, ops, nanops from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical, CategoricalAccessor from pandas.core.indexes.accessors import CombinedDatetimelikeProperties @@ -64,11 +56,9 @@ zip, u, OrderedDict, StringIO, range, get_range_parameters) from pandas.compat.numpy import function as nv -import pandas.core.ops as ops -import pandas.core.algorithms as algorithms import pandas.core.common as com -import pandas.core.nanops as nanops + import pandas.io.formats.format as fmt from pandas.util._decorators import ( Appender, deprecate, deprecate_kwarg, Substitution) @@ -230,7 +220,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if index is None: if not is_list_like(data): data = [data] - index = _default_index(len(data)) + index = com._default_index(len(data)) # create/copy the manager if isinstance(data, SingleBlockManager): @@ -688,7 +678,7 @@ def __getitem__(self, key): pass elif key is Ellipsis: return self - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): pass else: @@ -762,7 +752,7 @@ def _get_with(self, key): def _get_values_tuple(self, key): # mpl hackaround - if _any_none(*key): + if com._any_none(*key): return self._get_values(key) if not isinstance(self.index, MultiIndex): @@ -787,7 +777,7 @@ def setitem(key, value): try: self._set_with_engine(key, value) return - except (SettingWithCopyError): + except com.SettingWithCopyError: raise except (KeyError, ValueError): values = self._values @@ -887,7 +877,7 @@ def _set_labels(self, key, value): if isinstance(key, Index): key = key.values else: - key = _asarray_tuplesafe(key) + key = com._asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): @@ -939,7 +929,7 @@ def get_value(self, label, takeable=False): def _get_value(self, label, takeable=False): if takeable is True: - return _maybe_box_datetimelike(self._values[label]) + return com._maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) _get_value.__doc__ = get_value.__doc__ @@ -1039,7 +1029,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop: - new_index = _default_index(len(self)) + new_index = com._default_index(len(self)) if level is not None and isinstance(self.index, MultiIndex): if not isinstance(level, (tuple, list)): level = [level] @@ -1182,7 +1172,7 @@ def to_dict(self, into=dict): defaultdict(, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 - into_c = standardize_mapping(into) + into_c = com.standardize_mapping(into) return into_c(compat.iteritems(self)) def to_frame(self, name=None): @@ -1260,7 +1250,7 @@ def count(self, level=None): from pandas.core.index import _get_na_value if level is None: - return notna(_values_from_object(self)).sum() + return notna(com._values_from_object(self)).sum() if isinstance(level, compat.string_types): level = self.index._get_level_number(level) @@ -1342,7 +1332,7 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs): numpy.ndarray.argmin """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - i = nanops.nanargmin(_values_from_object(self), skipna=skipna) + i = nanops.nanargmin(com._values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1378,7 +1368,7 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): numpy.ndarray.argmax """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - i = nanops.nanargmax(_values_from_object(self), skipna=skipna) + i = nanops.nanargmax(com._values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1419,7 +1409,7 @@ def round(self, decimals=0, *args, **kwargs): """ nv.validate_round(args, kwargs) - result = _values_from_object(self).round(decimals) + result = com._values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result @@ -1536,7 +1526,7 @@ def diff(self, periods=1): ------- diffed : Series """ - result = algorithms.diff(_values_from_object(self), periods) + result = algorithms.diff(com._values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self, lag=1): @@ -1737,7 +1727,7 @@ def _binop(self, other, func, level=None, fill_value=None): with np.errstate(all='ignore'): result = func(this_vals, other_vals) - name = _maybe_match_name(self, other) + name = com._maybe_match_name(self, other) result = self._constructor(result, index=new_index, name=name) result = result.__finalize__(self) if name is None: @@ -1778,7 +1768,7 @@ def combine(self, other, func, fill_value=np.nan): """ if isinstance(other, Series): new_index = self.index.union(other.index) - new_name = _maybe_match_name(self, other) + new_name = com._maybe_match_name(self, other) new_values = np.empty(len(new_index), dtype=self.dtype) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) @@ -1823,7 +1813,7 @@ def combine_first(self, other): this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) # TODO: do we need name? - name = _maybe_match_name(self, other) # noqa + name = com._maybe_match_name(self, other) # noqa rs_vals = com._where_compat(isna(this), other._values, this._values) return self._constructor(rs_vals, index=new_index).__finalize__(self) @@ -1911,7 +1901,7 @@ def _try_kind_sort(arr): bad = isna(arr) good = ~bad - idx = _default_index(len(self)) + idx = com._default_index(len(self)) argsorted = _try_kind_sort(arr[good]) @@ -2784,7 +2774,7 @@ def isin(self, values): dtype: bool """ - result = algorithms.isin(_values_from_object(self), values) + result = algorithms.isin(com._values_from_object(self), values) return self._constructor(result, index=self.index).__finalize__(self) def between(self, left, right, inclusive=True): @@ -3253,7 +3243,7 @@ def _try_cast(arr, take_fast_path): if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: - subarr = _asarray_tuplesafe(data, dtype=dtype) + subarr = com._asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 49a0b8d86ad31..c7f5b0ba67c19 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -14,12 +14,10 @@ from pandas.core.dtypes.cast import maybe_upcast, find_common_type from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse -from pandas.core.common import _try_sort from pandas.compat.numpy import function as nv from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.series import Series -from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray, - _default_index) +from pandas.core.frame import DataFrame, extract_index, _prep_ndarray import pandas.core.algorithms as algos from pandas.core.internals import (BlockManager, create_block_manager_from_arrays) @@ -28,7 +26,7 @@ from pandas._libs.sparse import BlockIndex, get_blocks from pandas.util._decorators import Appender import pandas.core.ops as ops - +import pandas.core.common as com _shared_doc_kwargs = dict(klass='SparseDataFrame') @@ -133,7 +131,7 @@ def _init_dict(self, data, index, columns, dtype=None): columns = _ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: - columns = Index(_try_sort(list(data.keys()))) + columns = Index(com._try_sort(list(data.keys()))) if index is None: index = extract_index(list(data.values())) @@ -208,9 +206,9 @@ def _init_spmatrix(self, data, index, columns, dtype=None, def _prep_index(self, data, index, columns): N, K = data.shape if index is None: - index = _default_index(N) + index = com._default_index(N) if columns is None: - columns = _default_index(K) + columns = com._default_index(K) if len(columns) != K: raise ValueError('Column length mismatch: {columns} vs. {K}' diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index b5d2c0b607444..4b649927f8f72 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.common import is_scalar -from pandas.core.common import _values_from_object, _maybe_match_name from pandas.compat.numpy import function as nv from pandas.core.index import Index, _ensure_index, InvalidIndexError @@ -80,7 +79,7 @@ def wrapper(self, other): def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index - new_name = _maybe_match_name(left, right) + new_name = com._maybe_match_name(left, right) result = _sparse_array_op(left.values, right.values, op, name, series=True) @@ -423,7 +422,7 @@ def __getitem__(self, key): # Could not hash item, must be array-like? pass - key = _values_from_object(key) + key = com._values_from_object(key) if self.index.nlevels > 1 and isinstance(key, tuple): # to handle MultiIndex labels key = self.index.get_loc(key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 278b220753196..99b53f85d0cf2 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1235,7 +1235,6 @@ def str_translate(arr, table, deletechars=None): if deletechars is None: f = lambda x: x.translate(table) else: - from pandas import compat if compat.PY3: raise ValueError("deletechars is not a valid argument for " "str.translate in python 3. You should simply " From e87a9fb0441ae5f43c85c11e452c220146f25043 Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Thu, 18 Jan 2018 21:20:52 -0800 Subject: [PATCH 2/3] Add linting for com, implement over all files --- ci/lint.sh | 9 ++++ pandas/core/base.py | 11 ++-- pandas/core/computation/align.py | 6 ++- pandas/core/computation/expressions.py | 8 +-- pandas/core/frame.py | 12 ++--- pandas/core/generic.py | 16 +++--- pandas/core/groupby.py | 33 ++++++------ pandas/core/indexes/base.py | 35 +++++++------ pandas/core/indexes/category.py | 7 ++- pandas/core/indexes/datetimelike.py | 8 +-- pandas/core/indexes/datetimes.py | 12 ++--- pandas/core/indexes/interval.py | 23 +++++---- pandas/core/indexes/multi.py | 31 +++++------ pandas/core/indexes/numeric.py | 11 ++-- pandas/core/indexes/range.py | 6 ++- pandas/core/indexes/timedeltas.py | 11 ++-- pandas/core/indexing.py | 51 +++++++++---------- pandas/core/internals.py | 9 ++-- pandas/core/nanops.py | 12 ++--- pandas/core/ops.py | 15 +++--- pandas/core/panel.py | 17 ++++--- pandas/core/resample.py | 8 +-- pandas/core/strings.py | 6 +-- pandas/core/window.py | 27 +++++----- pandas/io/common.py | 5 +- pandas/io/formats/excel.py | 6 ++- pandas/io/formats/format.py | 9 ++-- pandas/io/formats/style.py | 8 +-- pandas/io/html.py | 19 +++---- pandas/io/json/json.py | 9 ++-- pandas/io/json/table_schema.py | 5 +- pandas/io/parquet.py | 7 +-- pandas/io/parsers.py | 5 +- pandas/io/pytables.py | 9 ++-- pandas/plotting/_core.py | 13 ++--- pandas/plotting/_style.py | 4 +- pandas/tests/frame/test_to_csv.py | 6 ++- .../tests/indexes/interval/test_interval.py | 8 +-- pandas/tests/io/parser/test_parsers.py | 5 +- pandas/tests/scalar/test_interval.py | 5 +- pandas/tests/test_algos.py | 6 ++- pandas/tests/test_resample.py | 5 +- pandas/tests/util/test_util.py | 6 ++- pandas/tseries/offsets.py | 8 +-- pandas/util/testing.py | 4 +- 45 files changed, 283 insertions(+), 253 deletions(-) diff --git a/ci/lint.sh b/ci/lint.sh index 35b39e2abb3c6..a96e0961304e7 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -91,6 +91,15 @@ if [ "$LINT" ]; then fi echo "Check for invalid testing DONE" + # Check for imports from pandas.core.common instead + # of `import pandas.core.common as com` + echo "Check for non-standard imports" + grep -R --include="*.py*" -E "from pandas.core.common import " pandas + if [ $? = "0" ]; then + RET=1 + fi + echo "Check for non-standard imports DONE" + echo "Check for use of lists instead of generators in built-in Python functions" # Example: Avoid `any([i for i in some_iterator])` in favor of `any(i for i in some_iterator)` diff --git a/pandas/core/base.py b/pandas/core/base.py index 4b3e74eae36b8..54d25a16a10a3 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -24,7 +24,6 @@ from pandas.compat import PYPY from pandas.util._decorators import (Appender, cache_readonly, deprecate_kwarg, Substitution) -from pandas.core.common import AbstractMethodError, _maybe_box_datetimelike from pandas.core.accessor import DirNamesMixin @@ -46,7 +45,7 @@ class StringMixin(object): # Formatting def __unicode__(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def __str__(self): """ @@ -278,10 +277,10 @@ def _gotitem(self, key, ndim, subset=None): subset to act on """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def aggregate(self, func, *args, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) agg = aggregate @@ -815,7 +814,7 @@ def tolist(self): """ if is_datetimelike(self): - return [_maybe_box_datetimelike(x) for x in self._values] + return [com._maybe_box_datetimelike(x) for x in self._values] else: return self._values.tolist() @@ -1238,4 +1237,4 @@ def duplicated(self, keep='first'): # abstracts def _update_inplace(self, result, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 2e912b0075bfd..6a20205644c6c 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -10,7 +10,9 @@ import pandas as pd from pandas import compat from pandas.errors import PerformanceWarning -from pandas.core.common import flatten + +import pandas.core.common as com + from pandas.core.computation.common import _result_type_many @@ -117,7 +119,7 @@ def _align(terms): """Align a set of terms""" try: # flatten the parse tree (a nested list, really) - terms = list(flatten(terms)) + terms = list(com.flatten(terms)) except TypeError: # can't iterate so it must just be a constant or single variable if isinstance(terms.value, pd.core.generic.NDFrame): diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 1dc19d33f3365..14548c1514805 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -8,7 +8,9 @@ import warnings import numpy as np -from pandas.core.common import _values_from_object + +import pandas.core.common as com + from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.core.config import get_option @@ -122,8 +124,8 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, def _where_standard(cond, a, b): - return np.where(_values_from_object(cond), _values_from_object(a), - _values_from_object(b)) + return np.where(com._values_from_object(cond), com._values_from_object(a), + com._values_from_object(b)) def _where_numexpr(cond, a, b): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index e46edab1a7386..09c3bfc9c4a42 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1941,30 +1941,28 @@ def transpose(self, *args, **kwargs): # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover - from pandas.core.common import _unpickle_array if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state - columns = _unpickle_array(cols) + columns = com._unpickle_array(cols) - index = _unpickle_array(idx) + index = com._unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover - from pandas.core.common import _unpickle_array # old unpickling (vals, idx, cols), object_state = state - index = _unpickle_array(idx) - dm = DataFrame(vals, index=index, columns=_unpickle_array(cols), + index = com._unpickle_array(idx) + dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, - columns=_unpickle_array(ocols), copy=False) + columns=com._unpickle_array(ocols), copy=False) dm = dm.join(objects) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 063cb8912016f..6e777281b11e1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -30,8 +30,6 @@ from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame -from pandas.core.common import (AbstractMethodError, SettingWithCopyError, - SettingWithCopyWarning) from pandas.core.base import PandasObject, SelectionMixin from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -196,7 +194,7 @@ def _constructor(self): """Used when a manipulation result has the same dimensions as the original. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def __unicode__(self): # unicode representation based upon iterating over self @@ -218,7 +216,7 @@ def _constructor_sliced(self): """Used when a manipulation result has one lower dimension(s) as the original, such as DataFrame single columns slicing. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) @property def _constructor_expanddim(self): @@ -2202,7 +2200,7 @@ def _iget_item_cache(self, item): return lower def _box_item_values(self, key, values): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _maybe_cache_changed(self, item, value): """The object has called back to us saying maybe it has changed. @@ -2395,9 +2393,10 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): ) if value == 'raise': - raise SettingWithCopyError(t) + raise com.SettingWithCopyError(t) elif value == 'warn': - warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel) + warnings.warn(t, com.SettingWithCopyWarning, + stacklevel=stacklevel) def __delitem__(self, key): """ @@ -6355,7 +6354,8 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if try_quick: try: - new_other = com._values_from_object(self).copy() + new_other = com._values_from_object(self) + new_other = new_other.copy() new_other[icond] = other other = new_other except Exception: diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 082b6e2a8b1a0..29c7b037ae97c 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -39,10 +39,6 @@ from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.missing import isna, notna, _maybe_fill -from pandas.core.common import (_values_from_object, AbstractMethodError, - _default_index, _not_none, _get_callable_name, - _asarray_tuplesafe, _pipe) - from pandas.core.base import (PandasObject, SelectionMixin, GroupByError, DataError, SpecificationError) from pandas.core.index import (Index, MultiIndex, @@ -61,6 +57,7 @@ from pandas.io.formats.printing import pprint_thing from pandas.util._validators import validate_kwargs +import pandas.core.common as com import pandas.core.algorithms as algorithms from pandas.core.config import option_context @@ -751,7 +748,7 @@ def __getattr__(self, attr): b 2""") @Appender(_pipe_template) def pipe(self, func, *args, **kwargs): - return _pipe(self, func, *args, **kwargs) + return com._pipe(self, func, *args, **kwargs) plot = property(GroupByPlot) @@ -895,7 +892,7 @@ def _iterate_slices(self): yield self._selection_name, self._selected_obj def transform(self, func, *args, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _cumcount_array(self, ascending=True): """ @@ -1037,7 +1034,7 @@ def _python_agg_general(self, func, *args, **kwargs): return self._wrap_aggregated_output(output) def _wrap_applied_output(self, *args, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _concat_objects(self, keys, values, not_indexed_same=False): from pandas.core.reshape.concat import concat @@ -1045,7 +1042,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False): def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing - for v in _not_none(*values): + for v in com._not_none(*values): ax = v._get_axis(self.axis) ax._reset_identity() return values @@ -1975,7 +1972,7 @@ def apply(self, f, data, axis=0): group_keys = self._get_group_keys() # oh boy - f_name = _get_callable_name(f) + f_name = com._get_callable_name(f) if (f_name not in _plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: @@ -2009,7 +2006,7 @@ def indices(self): return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] - keys = [_values_from_object(ping.group_index) + keys = [com._values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys) @@ -2707,7 +2704,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = self.obj[self.name] elif isinstance(self.grouper, (list, tuple)): - self.grouper = _asarray_tuplesafe(self.grouper) + self.grouper = com._asarray_tuplesafe(self.grouper) # a passed Categorical elif is_categorical_dtype(self.grouper): @@ -2934,7 +2931,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if not any_callable and not all_in_columns_index and \ not any_arraylike and not any_groupers and \ match_axis_length and level is None: - keys = [_asarray_tuplesafe(keys)] + keys = [com._asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: @@ -3229,7 +3226,7 @@ def _aggregate_multiple_funcs(self, arg, _level): columns.append(f) else: # protect against callables without names - columns.append(_get_callable_name(f)) + columns.append(com._get_callable_name(f)) arg = lzip(columns, arg) results = {} @@ -3829,7 +3826,7 @@ def _aggregate_generic(self, func, *args, **kwargs): return self._wrap_generic_output(result, obj) def _wrap_aggregated_output(self, output, names=None): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _aggregate_item_by_item(self, func, *args, **kwargs): # only for axis==0 @@ -3891,7 +3888,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH12824. def first_not_none(values): try: - return next(_not_none(*values)) + return next(com._not_none(*values)) except StopIteration: return None @@ -4585,7 +4582,7 @@ def groupby_series(obj, col=None): results = concat(results, axis=1) if not self.as_index: - results.index = _default_index(len(results)) + results.index = com._default_index(len(results)) return results boxplot = boxplot_frame_groupby @@ -4675,7 +4672,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): raise ValueError("axis value must be greater than 0") def _wrap_aggregated_output(self, output, names=None): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class NDArrayGroupBy(GroupBy): @@ -4731,7 +4728,7 @@ def _chop(self, sdata, slice_obj): return sdata.iloc[slice_obj] def apply(self, f): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class ArraySplitter(DataSplitter): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6d0a415f5b420..569403bd0206e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -41,11 +41,10 @@ needs_i8_conversion, is_iterator, is_list_like, is_scalar) -from pandas.core.common import (is_bool_indexer, _values_from_object, - _asarray_tuplesafe, _not_none, - _index_labels_to_array) from pandas.core.base import PandasObject, IndexOpsMixin + +import pandas.core.common as com import pandas.core.base as base from pandas.util._decorators import ( Appender, Substitution, cache_readonly, deprecate_kwarg) @@ -292,7 +291,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype('object') else: - subarr = _asarray_tuplesafe(data, dtype=object) + subarr = com._asarray_tuplesafe(data, dtype=object) # _asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens @@ -361,7 +360,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return MultiIndex.from_tuples( data, names=name or kwargs.get('names')) # other iterable of some kind - subarr = _asarray_tuplesafe(data, dtype=object) + subarr = com._asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) """ @@ -1498,7 +1497,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = _asarray_tuplesafe(keyarr) + keyarr = com._asarray_tuplesafe(keyarr) return keyarr _index_shared_docs['_convert_index_indexer'] = """ @@ -1736,10 +1735,10 @@ def __getitem__(self, key): # pessimization of basic indexing. return promote(getitem(key)) - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = np.asarray(key) - key = _values_from_object(key) + key = com._values_from_object(key) result = getitem(key) if not is_scalar(result): return promote(result) @@ -2022,8 +2021,8 @@ def equals(self, other): return other.equals(self) try: - return array_equivalent(_values_from_object(self), - _values_from_object(other)) + return array_equivalent(com._values_from_object(self), + com._values_from_object(other)) except Exception: return False @@ -2539,8 +2538,8 @@ def get_value(self, series, key): # invalid type as an indexer pass - s = _values_from_object(series) - k = _values_from_object(key) + s = com._values_from_object(series) + k = com._values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: @@ -2573,8 +2572,8 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(_values_from_object(arr), - _values_from_object(key), value) + self._engine.set_value(com._values_from_object(arr), + com._values_from_object(key), value) def _get_level_values(self, level): """ @@ -3193,8 +3192,8 @@ def _join_multi(self, other, how, return_indexers=True): other_is_mi = isinstance(other, MultiIndex) # figure out join names - self_names = _not_none(*self.names) - other_names = _not_none(*other.names) + self_names = com._not_none(*self.names) + other_names = com._not_none(*other.names) overlap = list(set(self_names) & set(other_names)) # need at least 1 in common, but not more than 1 @@ -3766,7 +3765,7 @@ def drop(self, labels, errors='raise'): If none of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None - labels = _index_labels_to_array(labels, dtype=arr_dtype) + labels = com._index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -4001,7 +4000,7 @@ def _validate_for_numeric_binop(self, other, op, opstr): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") - other = _values_from_object(other) + other = com._values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index ac7cb30fa823d..1c38f96491422 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -11,8 +11,6 @@ is_list_like, is_interval_dtype, is_scalar) -from pandas.core.common import (_asarray_tuplesafe, - _values_from_object) from pandas.core.dtypes.missing import array_equivalent, isna from pandas.core.algorithms import take_1d @@ -21,6 +19,7 @@ from pandas.core.config import get_option from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core import accessor +import pandas.core.common as com import pandas.core.base as base import pandas.core.missing as missing import pandas.core.indexes.base as ibase @@ -442,7 +441,7 @@ def get_value(self, series, key): know what you're doing """ try: - k = _values_from_object(key) + k = com._values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] @@ -620,7 +619,7 @@ def _convert_list_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = _asarray_tuplesafe(keyarr) + keyarr = com._asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 6d28ab8308a3f..424753543b866 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -34,7 +34,7 @@ from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr -from pandas.core.common import AbstractMethodError + from pandas.errors import NullFrequencyError import pandas.io.formats.printing as printing @@ -247,7 +247,7 @@ def _box_func(self): """ box function to get object from internal representation """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _box_values(self, values): """ @@ -591,7 +591,7 @@ def argmax(self, axis=None, *args, **kwargs): @property def _formatter_func(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _format_attrs(self): """ @@ -649,7 +649,7 @@ def _add_datelike(self, other): type(other).__name__)) def _sub_datelike(self, other): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _sub_period(self, other): return NotImplemented diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 0349e5c0a448f..7d58d22e5c0f1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -34,7 +34,7 @@ import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning -from pandas.core.common import _values_from_object, _maybe_box + from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs @@ -126,7 +126,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) result = func(np.asarray(other)) - result = _values_from_object(result) + result = com._values_from_object(result) if isinstance(other, Index): o_mask = other.values.view('i8') == libts.iNaT @@ -1488,8 +1488,8 @@ def get_value(self, series, key): return series.take(locs) try: - return _maybe_box(self, Index.get_value(self, series, key), - series, key) + return com._maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1508,9 +1508,9 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key, tz=self.tz) elif not isinstance(key, Timestamp): key = Timestamp(key) - values = self._engine.get_value(_values_from_object(series), + values = self._engine.get_value(com._values_from_object(series), key, tz=self.tz) - return _maybe_box(self, values, series, key) + return com._maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 58b1bdb3f55ea..b6fcce0addff4 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -35,9 +35,9 @@ from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex from pandas.compat.numpy import function as nv -from pandas.core.common import ( - _all_not_none, _any_none, _asarray_tuplesafe, _count_not_none, - is_bool_indexer, _maybe_box_datetimelike, _not_none) + +import pandas.core.common as com + from pandas.util._decorators import cache_readonly, Appender from pandas.core.config import get_option from pandas.tseries.frequencies import to_offset @@ -237,7 +237,8 @@ def __new__(cls, data, closed=None, data = maybe_convert_platform_interval(data) left, right, infer_closed = intervals_to_interval_bounds(data) - if _all_not_none(closed, infer_closed) and closed != infer_closed: + if (com._all_not_none(closed, infer_closed) and + closed != infer_closed): # GH 18421 msg = ("conflicting values for closed: constructor got " "'{closed}', inferred from data '{infer_closed}'" @@ -602,7 +603,7 @@ def to_tuples(self, na_tuple=True): >>> idx.to_tuples(na_tuple=False) Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') """ - tuples = _asarray_tuplesafe(zip(self.left, self.right)) + tuples = com._asarray_tuplesafe(zip(self.left, self.right)) if not na_tuple: # GH 18756 tuples = np.where(~self._isnan, tuples, np.nan) @@ -975,7 +976,7 @@ def get_loc(self, key, method=None): return self._engine.get_loc(key) def get_value(self, series, key): - if is_bool_indexer(key): + if com.is_bool_indexer(key): loc = key elif is_list_like(key): loc = self.get_indexer(key) @@ -1347,7 +1348,7 @@ def _is_type_compatible(a, b): return ((is_number(a) and is_number(b)) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or - _any_none(a, b)) + com._any_none(a, b)) def interval_range(start=None, end=None, periods=None, freq=None, @@ -1426,13 +1427,13 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - if _count_not_none(start, end, periods) != 2: + if com._count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') - start = _maybe_box_datetimelike(start) - end = _maybe_box_datetimelike(end) - endpoint = next(_not_none(start, end)) + start = com._maybe_box_datetimelike(start) + end = com._maybe_box_datetimelike(end) + endpoint = next(com._not_none(start, end)) if not _is_valid_endpoint(start): msg = 'start must be numeric or datetime-like, got {start}' diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5739c8dfd8b53..3bf1f79f6dceb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -22,11 +22,6 @@ is_scalar) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.errors import PerformanceWarning, UnsortedIndexError -from pandas.core.common import (_any_not_none, - _values_from_object, - is_bool_indexer, - is_null_slice, - is_true_slices) import pandas.core.base as base from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg @@ -539,7 +534,7 @@ def _format_attrs(self): max_seq_items=False)), ('labels', ibase.default_pprint(self._labels, max_seq_items=False))] - if _any_not_none(*self.names): + if com._any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', ibase.default_pprint(self.sortorder))) @@ -863,8 +858,8 @@ def get_value(self, series, key): from pandas.core.indexing import maybe_droplevels # Label-based - s = _values_from_object(series) - k = _values_from_object(key) + s = com._values_from_object(series) + k = com._values_from_object(key) def _try_mi(k): # TODO: what if a level contains tuples?? @@ -1474,7 +1469,7 @@ def __getitem__(self, key): return tuple(retval) else: - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = np.asarray(key) sortorder = self.sortorder else: @@ -1612,7 +1607,7 @@ def drop(self, labels, level=None, errors='raise'): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) - elif is_bool_indexer(loc): + elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' @@ -2145,7 +2140,7 @@ def _maybe_str_to_time_stamp(key, lev): pass return key - key = _values_from_object(key) + key = com._values_from_object(key) key = tuple(map(_maybe_str_to_time_stamp, key, self.levels)) return self._engine.get_loc(key) @@ -2303,7 +2298,7 @@ def partial_selection(key, indexer=None): key = tuple(self[indexer].tolist()[0]) return (self._engine.get_loc( - _values_from_object(key)), None) + com._values_from_object(key)), None) else: return partial_selection(key) @@ -2463,7 +2458,7 @@ def get_locs(self, seq): """ # must be lexsorted to at least as many levels - true_slices = [i for (i, s) in enumerate(is_true_slices(seq)) if s] + true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] if true_slices and true_slices[-1] >= self.lexsort_depth: raise UnsortedIndexError('MultiIndex slicing requires the index ' 'to be lexsorted: slicing on levels {0}, ' @@ -2480,7 +2475,7 @@ def _convert_to_indexer(r): m = np.zeros(n, dtype=bool) m[r] = True r = m.nonzero()[0] - elif is_bool_indexer(r): + elif com.is_bool_indexer(r): if len(r) != n: raise ValueError("cannot index with a boolean indexer " "that is not the same length as the " @@ -2498,7 +2493,7 @@ def _update_indexer(idxr, indexer=indexer): for i, k in enumerate(seq): - if is_bool_indexer(k): + if com.is_bool_indexer(k): # a boolean indexer, must be the same length! k = np.asarray(k) indexer = _update_indexer(_convert_to_indexer(k), @@ -2527,7 +2522,7 @@ def _update_indexer(idxr, indexer=indexer): # no matches we are done return Int64Index([])._values - elif is_null_slice(k): + elif com.is_null_slice(k): # empty slice indexer = _update_indexer(None, indexer=indexer) @@ -2594,8 +2589,8 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - return array_equivalent(self._values, - _values_from_object(_ensure_index(other))) + other_vals = com._values_from_object(_ensure_index(other)) + return array_equivalent(self._values, other_vals) if self.nlevels != other.nlevels: return False diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 6337c2f73d5ec..8cf4f6625d1f3 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -9,10 +9,11 @@ is_bool, is_bool_dtype, is_scalar) -from pandas.core.common import _asarray_tuplesafe, _values_from_object from pandas import compat from pandas.core import algorithms +import pandas.core.common as com + from pandas.core.indexes.base import ( Index, InvalidIndexError, _index_shared_docs) from pandas.util._decorators import Appender, cache_readonly @@ -251,9 +252,9 @@ def _convert_arr_indexer(self, keyarr): # Cast the indexer to uint64 if possible so # that the values returned from indexing are # also uint64. - keyarr = _asarray_tuplesafe(keyarr) + keyarr = com._asarray_tuplesafe(keyarr) if is_integer_dtype(keyarr): - return _asarray_tuplesafe(keyarr, dtype=np.uint64) + return com._asarray_tuplesafe(keyarr, dtype=np.uint64) return keyarr @Appender(_index_shared_docs['_convert_index_indexer']) @@ -357,9 +358,9 @@ def get_value(self, series, key): if not is_scalar(key): raise InvalidIndexError - k = _values_from_object(key) + k = com._values_from_object(key) loc = self.get_loc(k) - new_values = _values_from_object(series)[loc] + new_values = com._values_from_object(series)[loc] return new_values diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 10a923c056be2..fa15f66a0e741 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -10,10 +10,12 @@ is_int64_dtype) from pandas.core.dtypes.generic import ABCSeries +import pandas.core.common as com + from pandas import compat from pandas.compat import lrange, range, get_range_parameters from pandas.compat.numpy import function as nv -from pandas.core.common import _all_none + from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat @@ -89,7 +91,7 @@ def _ensure_int(value, field): return new_value - if _all_none(start, stop, step): + if com._all_none(start, stop, step): msg = "RangeIndex(...) must be called with integers" raise TypeError(msg) elif start is None: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 866329b16c830..9339ef9f0948a 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -17,7 +17,6 @@ _ensure_int64) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries -from pandas.core.common import _maybe_box, _values_from_object from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index @@ -77,7 +76,7 @@ def wrapper(self, other): other = TimedeltaIndex(other).values result = func(other) - result = _values_from_object(result) + result = com._values_from_object(result) if isinstance(other, Index): o_mask = other.values.view('i8') == iNaT @@ -710,8 +709,8 @@ def get_value(self, series, key): return self.get_value_maybe_box(series, key) try: - return _maybe_box(self, Index.get_value(self, series, key), - series, key) + return com._maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -727,8 +726,8 @@ def get_value(self, series, key): def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) - values = self._engine.get_value(_values_from_object(series), key) - return _maybe_box(self, values, series, key) + values = self._engine.get_value(com._values_from_object(series), key) + return com._maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e2c4043f0508d..3ca150cda83c7 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -20,9 +20,6 @@ from pandas.core.index import Index, MultiIndex import pandas.core.common as com -from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe, - is_null_slice, is_full_slice, - _values_from_object) from pandas._libs.indexing import _NDFrameIndexerBase @@ -314,7 +311,7 @@ def _setitem_with_indexer(self, indexer, value): # (not null slices) then we must take the split path, xref # GH 10360 if (isinstance(ax, MultiIndex) and - not (is_integer(i) or is_null_slice(i))): + not (is_integer(i) or com.is_null_slice(i))): take_split_path = True break @@ -519,8 +516,8 @@ def setter(item, v): # multi-dim object # GH6149 (null slice), GH10408 (full bounds) if (isinstance(pi, tuple) and - all(is_null_slice(idx) or - is_full_slice(idx, len(self.obj)) + all(com.is_null_slice(idx) or + com.is_full_slice(idx, len(self.obj)) for idx in pi)): s = v else: @@ -613,8 +610,10 @@ def can_do_equal_len(): # logic here if (len(indexer) > info_axis and is_integer(indexer[info_axis]) and - all(is_null_slice(idx) for i, idx in enumerate(indexer) - if i != info_axis) and item_labels.is_unique): + all(com.is_null_slice(idx) + for i, idx in enumerate(indexer) + if i != info_axis) and + item_labels.is_unique): self.obj[item_labels[indexer[info_axis]]] = value return @@ -667,7 +666,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False): ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) - aligners = [not is_null_slice(idx) for idx in indexer] + aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.obj.ndim == 2 @@ -706,7 +705,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False): # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): - if single_aligner and is_null_slice(idx): + if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): @@ -767,7 +766,7 @@ def _align_frame(self, indexer, df): if isinstance(indexer, tuple): - aligners = [not is_null_slice(idx) for idx in indexer] + aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) # TODO: single_aligner is not used single_aligner = sum_aligners == 1 # noqa @@ -869,7 +868,7 @@ def _getitem_tuple(self, tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') - if is_null_slice(key): + if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) @@ -890,7 +889,7 @@ def _multi_take_opportunity(self, tup): for indexer, ax in zip(tup, self.obj._data.axes): if isinstance(ax, MultiIndex): return False - elif is_bool_indexer(indexer): + elif com.is_bool_indexer(indexer): return False elif not ax.is_unique: return False @@ -915,7 +914,7 @@ def _convert_for_reindex(self, key, axis=None): axis = self.axis or 0 labels = self.obj._get_axis(axis) - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) return labels[key] else: @@ -923,7 +922,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird - keyarr = _asarray_tuplesafe(key) + keyarr = com._asarray_tuplesafe(key) if is_integer_dtype(keyarr): # Cast the indexer to uint64 if possible so @@ -1011,7 +1010,7 @@ def _getitem_lowerdim(self, tup): # Slices should return views, but calling iloc/loc with a null # slice returns a new object. - if is_null_slice(new_key): + if com.is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc/etc' return getattr(section, self.name)[new_key] @@ -1040,7 +1039,7 @@ def _getitem_nested_tuple(self, tup): axis = 0 for i, key in enumerate(tup): - if is_null_slice(key): + if com.is_null_slice(key): axis += 1 continue @@ -1113,7 +1112,7 @@ def _getitem_iterable(self, key, axis=None): labels = self.obj._get_axis(axis) - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) inds, = key.nonzero() return self.obj._take(inds, axis=axis, convert=False) @@ -1235,7 +1234,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): elif is_list_like_indexer(obj): - if is_bool_indexer(obj): + if com.is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds @@ -1265,7 +1264,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): raise KeyError('{mask} not in index' .format(mask=objarr[mask])) - return _values_from_object(indexer) + return com._values_from_object(indexer) else: try: @@ -1336,7 +1335,7 @@ def _has_valid_type(self, key, axis): if isinstance(key, slice): return True - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): return True elif is_list_like_indexer(key): @@ -1448,7 +1447,7 @@ def _has_valid_type(self, key, axis): if isinstance(key, slice): return True - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): return True elif is_list_like_indexer(key): @@ -1576,7 +1575,7 @@ def _getitem_axis(self, key, axis=None): if isinstance(key, slice): self._has_valid_type(key, axis) return self._get_slice_axis(key, axis=axis) - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): @@ -1653,7 +1652,7 @@ class _iLocIndexer(_LocationIndexer): _exception = IndexError def _has_valid_type(self, key, axis): - if is_bool_indexer(key): + if com.is_bool_indexer(key): if hasattr(key, 'index') and isinstance(key.index, Index): if key.index.inferred_type == 'integer': raise NotImplementedError("iLocation based boolean " @@ -1743,7 +1742,7 @@ def _getitem_tuple(self, tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') - if is_null_slice(key): + if com.is_null_slice(key): axis += 1 continue @@ -1807,7 +1806,7 @@ def _getitem_axis(self, key, axis=None): except TypeError: # pragma: no cover pass - if is_bool_indexer(key): + if com.is_bool_indexer(key): self._has_valid_type(key, axis) return self._getbool_axis(key, axis=axis) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index bc75a110354c0..fd64f68db3ba5 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -54,7 +54,8 @@ import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex -from pandas.core.common import is_null_slice, _any_not_none + +import pandas.core.common as com import pandas.core.algorithms as algos from pandas.core.index import Index, MultiIndex, _ensure_index @@ -589,7 +590,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, categories = kwargs.get('categories', None) ordered = kwargs.get('ordered', None) - if _any_not_none(categories, ordered): + if com._any_not_none(categories, ordered): dtype = CategoricalDtype(categories, ordered) if is_categorical_dtype(self.values): @@ -1731,7 +1732,7 @@ def iget(self, col): if self.ndim == 2 and isinstance(col, tuple): col, loc = col - if not is_null_slice(col) and col != 0: + if not com.is_null_slice(col) and col != 0: raise IndexError("{0} only contains one item".format(self)) return self.values[loc] else: @@ -2643,7 +2644,7 @@ def _slice(self, slicer): """ return a slice of my values """ if isinstance(slicer, tuple): col, loc = slicer - if not is_null_slice(col) and col != 0: + if not com.is_null_slice(col) and col != 0: raise IndexError("{0} only contains one item".format(self)) return self.values[loc] return self.values[slicer] diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index d1a355021f388..63989304bb5f9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -20,7 +20,7 @@ from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype from pandas.core.config import get_option -from pandas.core.common import _values_from_object +import pandas.core.common as com _BOTTLENECK_INSTALLED = False _MIN_BOTTLENECK_VERSION = '1.0.0' @@ -205,7 +205,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, if necessary copy and mask using the specified fill_value copy = True will force the copy """ - values = _values_from_object(values) + values = com._values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -376,7 +376,7 @@ def get_median(x): mask = notna(x) if not skipna and not mask.all(): return np.nan - return algos.median(_values_from_object(x[mask])) + return algos.median(com._values_from_object(x[mask])) if not is_float_dtype(values): values = values.astype('f8') @@ -437,7 +437,7 @@ def nanstd(values, axis=None, skipna=True, ddof=1): @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): - values = _values_from_object(values) + values = com._values_from_object(values) dtype = values.dtype mask = isna(values) if is_any_int_dtype(values): @@ -546,7 +546,7 @@ def nanskew(values, axis=None, skipna=True): """ - values = _values_from_object(values) + values = com._values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') @@ -604,7 +604,7 @@ def nankurt(values, axis=None, skipna=True): central moment. """ - values = _values_from_object(values) + values = com._values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') diff --git a/pandas/core/ops.py b/pandas/core/ops.py index fc3ea106252db..d78dbfe355d19 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -19,9 +19,10 @@ from pandas.compat import bind_method import pandas.core.missing as missing +import pandas.core.common as com from pandas.errors import PerformanceWarning, NullFrequencyError -from pandas.core.common import _values_from_object, _maybe_match_name + from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.common import ( needs_i8_conversion, @@ -634,7 +635,7 @@ def na_op(x, y): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) mask = notna(x) & notna(y) - result[mask] = op(x[mask], _values_from_object(y[mask])) + result[mask] = op(x[mask], com._values_from_object(y[mask])) elif isinstance(x, np.ndarray): result = np.empty(len(x), dtype=x.dtype) mask = notna(x) @@ -739,7 +740,7 @@ def dispatch_to_index_op(op, left, right, index_class): def _get_series_op_result_name(left, right): # `left` is always a pd.Series if isinstance(right, (ABCSeries, pd.Index)): - name = _maybe_match_name(left, right) + name = com._maybe_match_name(left, right) else: name = left.name return name @@ -802,7 +803,7 @@ def na_op(x, y): if is_scalar(y): mask = isna(x) - y = libindex.convert_scalar(x, _values_from_object(y)) + y = libindex.convert_scalar(x, com._values_from_object(y)) else: mask = isna(x) | isna(y) y = y.view('i8') @@ -827,7 +828,7 @@ def wrapper(self, other, axis=None): self._get_axis_number(axis) if isinstance(other, ABCSeries): - name = _maybe_match_name(self, other) + name = com._maybe_match_name(self, other) if not self._indexed_same(other): msg = 'Can only compare identically-labeled Series objects' raise ValueError(msg) @@ -879,7 +880,7 @@ def wrapper(self, other, axis=None): .format(typ=type(other))) # always return a full value series here - res = _values_from_object(res) + res = com._values_from_object(res) res = pd.Series(res, index=self.index, name=self.name, dtype='bool') return res @@ -931,7 +932,7 @@ def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) if isinstance(other, ABCSeries): - name = _maybe_match_name(self, other) + name = com._maybe_match_name(self, other) is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1a19a6b6e7cfe..2f293e1a58c89 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -17,11 +17,12 @@ import pandas.core.ops as ops import pandas.core.missing as missing +import pandas.core.common as com + from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) from pandas.compat.numpy import function as nv -from pandas.core.common import (_try_sort, _default_index, _all_not_none, - _any_not_none, _apply_if_callable) + from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -174,7 +175,7 @@ def _init_data(self, data, copy, dtype, **kwargs): axes = None if isinstance(data, BlockManager): - if _any_not_none(*passed_axes): + if com._any_not_none(*passed_axes): axes = [x if x is not None else y for x, y in zip(passed_axes, data.axes)] mgr = data @@ -186,7 +187,7 @@ def _init_data(self, data, copy, dtype, **kwargs): mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy) copy = False dtype = None - elif is_scalar(data) and _all_not_none(*passed_axes): + elif is_scalar(data) and com._all_not_none(*passed_axes): values = cast_scalar_to_array([len(x) for x in passed_axes], data, dtype=dtype) mgr = self._init_matrix(values, passed_axes, dtype=values.dtype, @@ -209,7 +210,7 @@ def _init_dict(self, data, axes, dtype=None): else: ks = list(data.keys()) if not isinstance(data, OrderedDict): - ks = _try_sort(ks) + ks = com._try_sort(ks) haxis = Index(ks) for k, v in compat.iteritems(data): @@ -287,7 +288,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): return cls(**d) def __getitem__(self, key): - key = _apply_if_callable(key, self) + key = com._apply_if_callable(key, self) if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) @@ -325,7 +326,7 @@ def _init_matrix(self, data, axes, dtype=None, copy=False): fixed_axes = [] for i, ax in enumerate(axes): if ax is None: - ax = _default_index(shape[i]) + ax = com._default_index(shape[i]) else: ax = _ensure_index(ax) fixed_axes.append(ax) @@ -601,7 +602,7 @@ def _box_item_values(self, key, values): return self._constructor_sliced(values, **d) def __setitem__(self, key, value): - key = _apply_if_callable(key, self) + key = com._apply_if_callable(key, self) shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5447ce7470b9d..c215d9d5cffcc 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -5,7 +5,7 @@ from textwrap import dedent import pandas as pd -from pandas.core.base import AbstractMethodError, GroupByMixin +from pandas.core.base import GroupByMixin from pandas.core.groupby import (BinGrouper, Grouper, _GroupBy, GroupBy, SeriesGroupBy, groupby, PanelGroupBy, @@ -233,7 +233,7 @@ def _convert_obj(self, obj): return obj def _get_binner_for_time(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _set_binner(self): """ @@ -372,10 +372,10 @@ def transform(self, arg, *args, **kwargs): arg, *args, **kwargs) def _downsample(self, f): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _upsample(self, f, limit=None, fill_value=None): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _gotitem(self, key, ndim, subset=None): """ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 99b53f85d0cf2..5c31b9a5668ff 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -12,8 +12,8 @@ is_scalar, is_integer, is_re) -from pandas.core.common import _values_from_object +import pandas.core.common as com from pandas.core.algorithms import take_1d import pandas.compat as compat from pandas.core.base import NoNewAttributesMixin @@ -37,7 +37,7 @@ def _get_array_list(arr, others): from pandas.core.series import Series - if len(others) and isinstance(_values_from_object(others)[0], + if len(others) and isinstance(com._values_from_object(others)[0], (list, np.ndarray, Series)): arrays = [arr] + list(others) else: @@ -461,7 +461,7 @@ def rep(x, r): return compat.text_type.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = lib.vec_binop(_values_from_object(arr), repeats, rep) + result = lib.vec_binop(com._values_from_object(arr), repeats, rep) return result diff --git a/pandas/core/window.py b/pandas/core/window.py index 5d2fa16876c11..51222b8ef5f92 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -32,7 +32,8 @@ from pandas.core.base import (PandasObject, SelectionMixin, GroupByMixin) -from pandas.core.common import _asarray_tuplesafe, _count_not_none +import pandas.core.common as com + import pandas._libs.window as _window from pandas import compat @@ -508,7 +509,7 @@ def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): - return _asarray_tuplesafe(window).astype(float) + return com._asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig @@ -1908,33 +1909,33 @@ def dataframe_from_int_dict(data, frame_template): return _flex_binary_moment(arg2, arg1, f) -def _get_center_of_mass(com, span, halflife, alpha): - valid_count = _count_not_none(com, span, halflife, alpha) +def _get_center_of_mass(comass, span, halflife, alpha): + valid_count = com._count_not_none(comass, span, halflife, alpha) if valid_count > 1: - raise ValueError("com, span, halflife, and alpha " + raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 - if com is not None: - if com < 0: - raise ValueError("com must satisfy: com >= 0") + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") - com = (span - 1) / 2. + comass = (span - 1) / 2. elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") decay = 1 - np.exp(np.log(0.5) / halflife) - com = 1 / decay - 1 + comass = 1 / decay - 1 elif alpha is not None: if alpha <= 0 or alpha > 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") - com = (1.0 - alpha) / alpha + comass = (1.0 - alpha) / alpha else: - raise ValueError("Must pass one of com, span, halflife, or alpha") + raise ValueError("Must pass one of comass, span, halflife, or alpha") - return float(com) + return float(comass) def _offset(window, center): diff --git a/pandas/io/common.py b/pandas/io/common.py index c2d1da5a1035d..ce7bc0debd993 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -9,7 +9,8 @@ from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat from pandas.io.formats.printing import pprint_thing -from pandas.core.common import AbstractMethodError + +import pandas.core.common as com from pandas.core.dtypes.common import is_number, is_file_like # compat @@ -66,7 +67,7 @@ def __iter__(self): return self def __next__(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) if not compat.PY3: diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index aff3e35861434..d1b9974b18c5d 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -10,7 +10,9 @@ from pandas.compat import reduce from pandas.io.formats.css import CSSResolver, CSSWarning from pandas.io.formats.printing import pprint_thing -from pandas.core.common import _any_not_none + +import pandas.core.common as com + from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes import missing from pandas import Index, MultiIndex, PeriodIndex @@ -549,7 +551,7 @@ def _format_hierarchical_rows(self): self.rowcounter += 1 # if index labels are not empty go ahead and dump - if _any_not_none(*index_labels) and self.header is not False: + if com._any_not_none(*index_labels) and self.header is not False: for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 886a887568d69..29780d63b5888 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -27,7 +27,8 @@ is_list_like) from pandas.core.dtypes.generic import ABCSparseArray from pandas.core.base import PandasObject -from pandas.core.common import _any_not_none, sentinel_factory +import pandas.core.common as com + from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat from pandas.compat import (StringIO, lzip, range, map, zip, u, @@ -1277,7 +1278,7 @@ def _column_header(): if self.fmt.sparsify: # GH3547 - sentinel = sentinel_factory() + sentinel = com.sentinel_factory() else: sentinel = None levels = self.columns.format(sparsify=sentinel, adjoin=False, @@ -1446,7 +1447,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): if self.fmt.sparsify: # GH3547 - sentinel = sentinel_factory() + sentinel = com.sentinel_factory() levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) @@ -2372,7 +2373,7 @@ def single_row_table(row): # pragma: no cover def _has_names(index): if isinstance(index, MultiIndex): - return _any_not_none(*index.names) + return com._any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 2c3d92cea0ad8..097226861a7a1 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -27,7 +27,8 @@ from pandas.compat import range from pandas.core.config import get_option from pandas.core.generic import _shared_docs -from pandas.core.common import _any_not_none, sentinel_factory + +import pandas.core.common as com from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice from pandas.util._decorators import Appender try: @@ -257,7 +258,8 @@ def format_attr(pair): row_es.append(es) head.append(row_es) - if (self.data.index.names and _any_not_none(*self.data.index.names) and + if (self.data.index.names and + com._any_not_none(*self.data.index.names) and not hidden_index): index_header_row = [] @@ -1207,7 +1209,7 @@ def _get_level_lengths(index, hidden_elements=None): Result is a dictionary of (level, inital_position): span """ - sentinel = sentinel_factory() + sentinel = com.sentinel_factory() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if hidden_elements is None: diff --git a/pandas/io/html.py b/pandas/io/html.py index e7794864ccb3e..fe1f5e5fe3181 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -20,7 +20,8 @@ from pandas.compat import (lrange, lmap, u, string_types, iteritems, raise_with_traceback, binary_type) from pandas import Series -from pandas.core.common import AbstractMethodError + +import pandas.core.common as com from pandas.io.formats.printing import pprint_thing _IMPORTS = False @@ -234,7 +235,7 @@ def _text_getter(self, obj): text : str or unicode The text from an individual DOM node. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_td(self, obj): """Return the td elements from a row element. @@ -248,7 +249,7 @@ def _parse_td(self, obj): columns : list of node-like These are the elements of each row, i.e., the columns. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tables(self, doc, match, attrs): """Return all tables from the parsed DOM. @@ -275,7 +276,7 @@ def _parse_tables(self, doc, match, attrs): tables : list of node-like A list of elements to be parsed into raw data. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tr(self, table): """Return the list of row elements from the parsed table element. @@ -290,7 +291,7 @@ def _parse_tr(self, table): rows : list of node-like A list row elements of a table, usually or ... element. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tbody(self, table): """Return the body of the table. @@ -320,7 +321,7 @@ def _parse_tbody(self, table): tbody : node-like A ... element. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tfoot(self, table): """Return the footer of the table if any. @@ -335,7 +336,7 @@ def _parse_tfoot(self, table): tfoot : node-like A ... element. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _build_doc(self): """Return a tree-like object that can be used to iterate over the DOM. @@ -344,7 +345,7 @@ def _build_doc(self): ------- obj : tree-like """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _build_table(self, table): header = self._parse_raw_thead(table) diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 6d35fc5769331..49978dc073199 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -12,7 +12,8 @@ _infer_compression, _stringify_path, BaseIterator) from pandas.io.parsers import _validate_integer -from pandas.core.common import AbstractMethodError + +import pandas.core.common as com from pandas.core.reshape.concat import concat from pandas.io.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits @@ -93,7 +94,7 @@ def __init__(self, obj, orient, date_format, double_precision, self._format_axes() def _format_axes(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def write(self): return self._write(self.obj, self.orient, self.double_precision, @@ -648,7 +649,7 @@ def _convert_axes(self): setattr(self.obj, axis, new_axis) def _try_convert_types(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): @@ -761,7 +762,7 @@ def _try_convert_to_date(self, data): return data, False def _try_convert_dates(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class SeriesParser(Parser): diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 89b7a1de8acfc..ada9ca7233bff 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -8,7 +8,8 @@ import pandas._libs.json as json from pandas import DataFrame from pandas.api.types import CategoricalDtype -from pandas.core.common import _all_not_none + +import pandas.core.common as com from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -69,7 +70,7 @@ def as_json_table_type(x): def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" - if _all_not_none(*data.index.names): + if com._all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == 'index': warnings.warn("Index name of 'index' is not round-trippable") diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 1d3fd8552eeb7..773ac45392929 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -4,7 +4,8 @@ from distutils.version import LooseVersion from pandas import DataFrame, RangeIndex, Int64Index, get_option from pandas.compat import string_types -from pandas.core.common import AbstractMethodError +import pandas.core.common as com + from pandas.io.common import get_filepath_or_buffer, is_s3_url @@ -64,10 +65,10 @@ def validate_dataframe(df): raise ValueError("Index level names must be strings") def write(self, df, path, compression, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def read(self, path, columns=None, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class PyArrowImpl(BaseImpl): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 1ba687541eecf..b5a49cec97733 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -30,7 +30,8 @@ from pandas.core.frame import DataFrame from pandas.core.categorical import Categorical from pandas.core import algorithms -from pandas.core.common import AbstractMethodError +import pandas.core.common as com + from pandas.io.date_converters import generic_parser from pandas.errors import ParserWarning, ParserError, EmptyDataError from pandas.io.common import (get_filepath_or_buffer, is_file_like, @@ -1010,7 +1011,7 @@ def _make_engine(self, engine='c'): self._engine = klass(self.f, **self.options) def _failover_to_python(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def read(self, nrows=None): nrows = _validate_integer('nrows', nrows) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 72543bb6f825e..c0dd26b5ec974 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -34,7 +34,8 @@ from pandas.core.base import StringMixin from pandas.io.formats.printing import adjoin, pprint_thing from pandas.errors import PerformanceWarning -from pandas.core.common import _asarray_tuplesafe, _all_none + +import pandas.core.common as com from pandas.core.algorithms import match, unique from pandas.core.categorical import Categorical, _factorize_from_iterables from pandas.core.internals import (BlockManager, make_block, @@ -902,7 +903,7 @@ def remove(self, key, where=None, start=None, stop=None): raise KeyError('No object named %s in the file' % key) # remove the node - if _all_none(where, start, stop): + if com._all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table @@ -2367,7 +2368,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): support fully deleting the node in its entirety (only) - where specification must be None """ - if _all_none(where, start, stop): + if com._all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None @@ -3843,7 +3844,7 @@ def read(self, where=None, columns=None, **kwargs): tuple_index = long_index.values unique_tuples = lib.fast_unique(tuple_index) - unique_tuples = _asarray_tuplesafe(unique_tuples) + unique_tuples = com._asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) indexer = _ensure_platform_int(indexer) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 3094d7d0ab1c6..dff55b36afb4d 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -10,6 +10,8 @@ import numpy as np from pandas.util._decorators import cache_readonly + +import pandas.core.common as com from pandas.core.base import PandasObject from pandas.core.config import get_option from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike @@ -21,7 +23,6 @@ is_iterator) from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame -from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex @@ -225,7 +226,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): # TODO: unused? # if self.sort_columns: - # columns = _try_sort(data.columns) + # columns = com._try_sort(data.columns) # else: # columns = data.columns @@ -367,7 +368,7 @@ def _compute_plot_data(self): self.data = numeric_data def _make_plot(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _add_table(self): if self.table is False: @@ -609,7 +610,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): def _get_index_name(self): if isinstance(self.data.index, MultiIndex): name = self.data.index.names - if _any_not_none(*name): + if com._any_not_none(*name): name = ','.join(pprint_thing(x) for x in name) else: name = None @@ -957,7 +958,7 @@ def _make_plot(self): it = self._iter_data() stacking_id = self._get_stacking_id() - is_errorbar = _any_not_none(*self.errors.values()) + is_errorbar = com._any_not_none(*self.errors.values()) colors = self._get_colors() for i, (label, y) in enumerate(it): @@ -2182,7 +2183,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, layout=layout) _axes = _flatten(axes) - for i, col in enumerate(_try_sort(data.columns)): + for i, col in enumerate(com._try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 887202e22b4e0..426b29a8840f4 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -44,12 +44,12 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', if isinstance(colors, compat.string_types): colors = list(colors) elif color_type == 'random': - from pandas.core.common import _random_state + import pandas.core.common as com def random_color(column): """ Returns a random color represented as a list of length 3""" # GH17525 use common._random_state to avoid resetting the seed - rs = _random_state(column) + rs = com._random_state(column) return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 0ca25735fc03f..f9e6d31def622 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -9,7 +9,9 @@ import numpy as np from pandas.compat import (lmap, range, lrange, StringIO, u) -from pandas.core.common import _all_none + +import pandas.core.common as com + from pandas.errors import ParserError from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp, date_range, read_csv, compat, to_datetime) @@ -572,7 +574,7 @@ def _make_frame(names=None): df = _make_frame(True) df.to_csv(path, index=False) result = read_csv(path, header=[0, 1]) - assert _all_none(*result.columns.names) + assert com._all_none(*result.columns.names) result.columns.names = df.columns.names assert_frame_equal(df, result) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 9895ee06a22c0..57e7d7e8a06d2 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -6,7 +6,9 @@ Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, Timedelta, date_range, timedelta_range, Categorical) from pandas.compat import lzip -from pandas.core.common import _asarray_tuplesafe + +import pandas.core.common as com + from pandas.tests.indexes.common import Base import pandas.util.testing as tm import pandas as pd @@ -1177,7 +1179,7 @@ def test_to_tuples(self, tuples): # GH 18756 idx = IntervalIndex.from_tuples(tuples) result = idx.to_tuples() - expected = Index(_asarray_tuplesafe(tuples)) + expected = Index(com._asarray_tuplesafe(tuples)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('tuples', [ @@ -1193,7 +1195,7 @@ def test_to_tuples_na(self, tuples, na_tuple): result = idx.to_tuples(na_tuple=na_tuple) # check the non-NA portion - expected_notna = Index(_asarray_tuplesafe(tuples[:-1])) + expected_notna = Index(com._asarray_tuplesafe(tuples[:-1])) result_notna = result[:-1] tm.assert_index_equal(result_notna, expected_notna) diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index 0ea4757b10e94..85aca9e59e219 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -4,7 +4,8 @@ import pandas.util.testing as tm from pandas import read_csv, read_table, DataFrame -from pandas.core.common import AbstractMethodError +import pandas.core.common as com + from pandas._libs.lib import Timestamp from pandas.compat import StringIO @@ -43,7 +44,7 @@ def read_table(self, *args, **kwargs): raise NotImplementedError def float_precision_choices(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def setup_method(self, method): self.dirpath = tm.get_data_path() diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py index 23dad9736dac5..b51bc88d705c0 100644 --- a/pandas/tests/scalar/test_interval.py +++ b/pandas/tests/scalar/test_interval.py @@ -2,7 +2,8 @@ import numpy as np from pandas import Interval, Timestamp, Timedelta -from pandas.core.common import _any_none + +import pandas.core.common as com import pytest import pandas.util.testing as tm @@ -197,6 +198,6 @@ def test_constructor_errors_tz(self, tz_left, tz_right): # GH 18538 left = Timestamp('2017-01-01', tz=tz_left) right = Timestamp('2017-01-02', tz=tz_right) - error = TypeError if _any_none(tz_left, tz_right) else ValueError + error = TypeError if com._any_none(tz_left, tz_right) else ValueError with pytest.raises(error): Interval(left, right) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 6b3b519d49f7f..6cd9da0fe718a 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -17,7 +17,8 @@ from pandas._libs.hashtable import unique_label_indices from pandas.compat import lrange, range import pandas.core.algorithms as algos -from pandas.core.common import _asarray_tuplesafe +import pandas.core.common as com + import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.core.dtypes.dtypes import CategoricalDtype as CDT @@ -217,7 +218,8 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): tm.assert_numpy_array_equal(result[0], np.array(expected_label, dtype=np.intp)) - expected_level_array = _asarray_tuplesafe(expected_level, dtype=object) + expected_level_array = com._asarray_tuplesafe(expected_level, + dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) def test_complex_sorting(self): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index e9a517605020a..515850c14ecd6 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -20,9 +20,10 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.compat import range, lrange, zip, product, OrderedDict -from pandas.core.base import SpecificationError, AbstractMethodError +from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall from pandas.core.groupby import DataError +import pandas.core.common as com from pandas.tseries.frequencies import to_offset from pandas.core.indexes.datetimes import date_range @@ -726,7 +727,7 @@ def index(self, _index_start, _index_end, _index_freq): @pytest.fixture def _series_name(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) @pytest.fixture def _static_values(self, index): diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 8da2b401fc848..5fd66bb3f2b85 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -8,7 +8,9 @@ import pytest from pandas.compat import intern -from pandas.core.common import _all_none + +import pandas.core.common as com + from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf from pandas.util._decorators import deprecate_kwarg, make_signature from pandas.util._validators import (validate_args, validate_kwargs, @@ -438,7 +440,7 @@ def test_set_locale(self): pytest.skip("Only a single locale found, no point in " "trying to test setting another locale") - if _all_none(*self.current_locale): + if com._all_none(*self.current_locale): # Not sure why, but on some travis runs with pytest, # getlocale() returned (None, None). pytest.skip("Current locale is not set.") diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index e6b9f66c094c1..ec206e0997d0b 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod from pandas.core.tools.datetimes import to_datetime -from pandas.core.common import AbstractMethodError +import pandas.core.common as com # import after tools, dateutil check from dateutil.easter import easter @@ -1148,7 +1148,7 @@ def apply(self, other): def _apply(self, n, other): """Handle specific apply logic for child classes""" - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) @apply_index_wraps def apply_index(self, i): @@ -1182,11 +1182,11 @@ def _get_roll(self, i, before_day_of_month, after_day_of_month): The roll array is based on the fact that i gets rolled back to the first day of the month. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _apply_index_days(self, i, roll): """Apply the correct day for each date in i""" - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class SemiMonthEnd(SemiMonthOffset): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 1bea25a16ca1e..30915f7891c8c 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -32,7 +32,7 @@ is_list_like) from pandas.io.formats.printing import pprint_thing from pandas.core.algorithms import take_1d -from pandas.core.common import _all_not_none +import pandas.core.common as com import pandas.compat as compat from pandas.compat import ( @@ -484,7 +484,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL): except ValueError: yield new_locale else: - if _all_not_none(*normalized_locale): + if com._all_not_none(*normalized_locale): yield '.'.join(normalized_locale) else: yield new_locale From 7872f332794b082ae70e6b937697b0e7e3abb86d Mon Sep 17 00:00:00 2001 From: Brock Mendel Date: Fri, 19 Jan 2018 11:17:45 -0800 Subject: [PATCH 3/3] revert whitespace changes --- pandas/core/computation/align.py | 2 -- pandas/core/computation/expressions.py | 1 - pandas/core/indexes/base.py | 1 - pandas/core/indexes/datetimelike.py | 6 +----- pandas/core/indexes/datetimes.py | 1 - pandas/core/indexes/interval.py | 2 -- pandas/core/indexes/numeric.py | 1 - pandas/core/indexes/range.py | 3 +-- pandas/core/internals.py | 1 - pandas/core/ops.py | 1 - pandas/core/panel.py | 3 --- pandas/core/series.py | 7 ++++--- pandas/core/window.py | 1 - pandas/io/common.py | 1 - pandas/io/formats/excel.py | 2 -- pandas/io/formats/format.py | 1 - pandas/io/formats/style.py | 1 - pandas/io/html.py | 1 - pandas/io/json/json.py | 1 - pandas/io/json/table_schema.py | 1 - pandas/io/parquet.py | 1 - pandas/io/parsers.py | 1 - pandas/io/pytables.py | 1 - pandas/plotting/_core.py | 1 - pandas/tests/frame/test_to_csv.py | 2 -- pandas/tests/indexes/interval/test_interval.py | 2 -- pandas/tests/io/parser/test_parsers.py | 1 - pandas/tests/scalar/test_interval.py | 1 - pandas/tests/test_algos.py | 1 - pandas/tests/util/test_util.py | 2 -- 30 files changed, 6 insertions(+), 45 deletions(-) diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 6a20205644c6c..22c8b641cf974 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -10,9 +10,7 @@ import pandas as pd from pandas import compat from pandas.errors import PerformanceWarning - import pandas.core.common as com - from pandas.core.computation.common import _result_type_many diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 14548c1514805..781101f5804e6 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -10,7 +10,6 @@ import numpy as np import pandas.core.common as com - from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.core.config import get_option diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 569403bd0206e..34578d7a717b1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -43,7 +43,6 @@ is_scalar) from pandas.core.base import PandasObject, IndexOpsMixin - import pandas.core.common as com import pandas.core.base as base from pandas.util._decorators import ( diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 424753543b866..f43c6dc567f69 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -10,7 +10,6 @@ from pandas.core.tools.timedeltas import to_timedelta import numpy as np - from pandas.core.dtypes.common import ( _ensure_int64, is_dtype_equal, @@ -30,13 +29,9 @@ from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna -import pandas.core.dtypes.concat as _concat - from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr - from pandas.errors import NullFrequencyError - import pandas.io.formats.printing as printing from pandas._libs import lib, iNaT, NaT from pandas._libs.tslibs.period import Period @@ -44,6 +39,7 @@ from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly +import pandas.core.dtypes.concat as _concat import pandas.tseries.frequencies as frequencies import pandas.core.indexes.base as ibase diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7d58d22e5c0f1..afc86a51c02b4 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -34,7 +34,6 @@ import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning - from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b6fcce0addff4..0e087c40cfef3 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -35,9 +35,7 @@ from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex from pandas.compat.numpy import function as nv - import pandas.core.common as com - from pandas.util._decorators import cache_readonly, Appender from pandas.core.config import get_option from pandas.tseries.frequencies import to_offset diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 8cf4f6625d1f3..5e6ebb7588ab9 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -13,7 +13,6 @@ from pandas import compat from pandas.core import algorithms import pandas.core.common as com - from pandas.core.indexes.base import ( Index, InvalidIndexError, _index_shared_docs) from pandas.util._decorators import Appender, cache_readonly diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index fa15f66a0e741..1a18b86acf57f 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -10,12 +10,11 @@ is_int64_dtype) from pandas.core.dtypes.generic import ABCSeries -import pandas.core.common as com - from pandas import compat from pandas.compat import lrange, range, get_range_parameters from pandas.compat.numpy import function as nv +import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat diff --git a/pandas/core/internals.py b/pandas/core/internals.py index fb495e2ce4ffc..516b58a26510c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -56,7 +56,6 @@ import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex - import pandas.core.common as com import pandas.core.algorithms as algos diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 2d31f9c5845ed..343b62940173e 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -21,7 +21,6 @@ import pandas.core.common as com from pandas.errors import NullFrequencyError - from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.common import ( needs_i8_conversion, diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 2f293e1a58c89..ae86074ce2d05 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -18,11 +18,9 @@ import pandas.core.ops as ops import pandas.core.missing as missing import pandas.core.common as com - from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) from pandas.compat.numpy import function as nv - from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -32,7 +30,6 @@ from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) - from pandas.core.series import Series from pandas.core.reshape.util import cartesian_product from pandas.util._decorators import Appender diff --git a/pandas/core/series.py b/pandas/core/series.py index 954af8ef3da84..470dd23f26316 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -42,8 +42,7 @@ from pandas.core.index import (Index, MultiIndex, InvalidIndexError, Float64Index, _ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices - -from pandas.core import generic, base, algorithms, ops, nanops +from pandas.core import generic, base from pandas.core.internals import SingleBlockManager from pandas.core.arrays.categorical import Categorical, CategoricalAccessor from pandas.core.indexes.accessors import CombinedDatetimelikeProperties @@ -56,9 +55,11 @@ zip, u, OrderedDict, StringIO, range, get_range_parameters) from pandas.compat.numpy import function as nv +import pandas.core.ops as ops +import pandas.core.algorithms as algorithms import pandas.core.common as com - +import pandas.core.nanops as nanops import pandas.io.formats.format as fmt from pandas.util._decorators import ( Appender, deprecate, deprecate_kwarg, Substitution) diff --git a/pandas/core/window.py b/pandas/core/window.py index 51222b8ef5f92..4d6a1de60f59b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -33,7 +33,6 @@ from pandas.core.base import (PandasObject, SelectionMixin, GroupByMixin) import pandas.core.common as com - import pandas._libs.window as _window from pandas import compat diff --git a/pandas/io/common.py b/pandas/io/common.py index ce7bc0debd993..4ba969f0abac4 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -9,7 +9,6 @@ from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat from pandas.io.formats.printing import pprint_thing - import pandas.core.common as com from pandas.core.dtypes.common import is_number, is_file_like diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index d1b9974b18c5d..2fc648d2952c4 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -10,9 +10,7 @@ from pandas.compat import reduce from pandas.io.formats.css import CSSResolver, CSSWarning from pandas.io.formats.printing import pprint_thing - import pandas.core.common as com - from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes import missing from pandas import Index, MultiIndex, PeriodIndex diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 29780d63b5888..2293032ebb8a1 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -28,7 +28,6 @@ from pandas.core.dtypes.generic import ABCSparseArray from pandas.core.base import PandasObject import pandas.core.common as com - from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat from pandas.compat import (StringIO, lzip, range, map, zip, u, diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 097226861a7a1..58796aa30f0bf 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -27,7 +27,6 @@ from pandas.compat import range from pandas.core.config import get_option from pandas.core.generic import _shared_docs - import pandas.core.common as com from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice from pandas.util._decorators import Appender diff --git a/pandas/io/html.py b/pandas/io/html.py index fe1f5e5fe3181..be4854bc19cc6 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -20,7 +20,6 @@ from pandas.compat import (lrange, lmap, u, string_types, iteritems, raise_with_traceback, binary_type) from pandas import Series - import pandas.core.common as com from pandas.io.formats.printing import pprint_thing diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 49978dc073199..e3a1321336fb3 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -12,7 +12,6 @@ _infer_compression, _stringify_path, BaseIterator) from pandas.io.parsers import _validate_integer - import pandas.core.common as com from pandas.core.reshape.concat import concat from pandas.io.formats.printing import pprint_thing diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index ada9ca7233bff..01f7db7d68664 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -8,7 +8,6 @@ import pandas._libs.json as json from pandas import DataFrame from pandas.api.types import CategoricalDtype - import pandas.core.common as com from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 773ac45392929..4508d5c1e1781 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -5,7 +5,6 @@ from pandas import DataFrame, RangeIndex, Int64Index, get_option from pandas.compat import string_types import pandas.core.common as com - from pandas.io.common import get_filepath_or_buffer, is_s3_url diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index ecc9d1353c552..5135bb01fb378 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -31,7 +31,6 @@ from pandas.core.arrays import Categorical from pandas.core import algorithms import pandas.core.common as com - from pandas.io.date_converters import generic_parser from pandas.errors import ParserWarning, ParserError, EmptyDataError from pandas.io.common import (get_filepath_or_buffer, is_file_like, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 0e70e77b73263..106823199ee93 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -34,7 +34,6 @@ from pandas.core.base import StringMixin from pandas.io.formats.printing import adjoin, pprint_thing from pandas.errors import PerformanceWarning - import pandas.core.common as com from pandas.core.algorithms import match, unique from pandas.core.arrays.categorical import (Categorical, diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index dff55b36afb4d..8b03d6ddde4ec 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -10,7 +10,6 @@ import numpy as np from pandas.util._decorators import cache_readonly - import pandas.core.common as com from pandas.core.base import PandasObject from pandas.core.config import get_option diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index f9e6d31def622..da881e6f29bc9 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -9,9 +9,7 @@ import numpy as np from pandas.compat import (lmap, range, lrange, StringIO, u) - import pandas.core.common as com - from pandas.errors import ParserError from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp, date_range, read_csv, compat, to_datetime) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 57e7d7e8a06d2..e8a7bc50d8e3c 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -6,9 +6,7 @@ Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, Timedelta, date_range, timedelta_range, Categorical) from pandas.compat import lzip - import pandas.core.common as com - from pandas.tests.indexes.common import Base import pandas.util.testing as tm import pandas as pd diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index 85aca9e59e219..ec240531925e3 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -5,7 +5,6 @@ from pandas import read_csv, read_table, DataFrame import pandas.core.common as com - from pandas._libs.lib import Timestamp from pandas.compat import StringIO diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py index b51bc88d705c0..c9e6e84d226a8 100644 --- a/pandas/tests/scalar/test_interval.py +++ b/pandas/tests/scalar/test_interval.py @@ -2,7 +2,6 @@ import numpy as np from pandas import Interval, Timestamp, Timedelta - import pandas.core.common as com import pytest diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 6cd9da0fe718a..b1e3177547ac6 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -18,7 +18,6 @@ from pandas.compat import lrange, range import pandas.core.algorithms as algos import pandas.core.common as com - import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.core.dtypes.dtypes import CategoricalDtype as CDT diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 5fd66bb3f2b85..3b0a428218771 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -8,9 +8,7 @@ import pytest from pandas.compat import intern - import pandas.core.common as com - from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf from pandas.util._decorators import deprecate_kwarg, make_signature from pandas.util._validators import (validate_args, validate_kwargs,
elements. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_thead(self, table): """Return the header of a table. @@ -305,7 +306,7 @@ def _parse_thead(self, table): thead : node-like A