Skip to content

CLN: move unique1d to algorithms from nanops #14919

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 19, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,38 @@ def _unique_generic(values, table_type, type_caster):
return type_caster(uniques)


def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = htable.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = htable.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = htable.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = htable.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:

# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
table = htable.StringHashTable(len(values))
else:
table = htable.PyObjectHashTable(len(values))

uniques = table.unique(_ensure_object(values))

return uniques


def isin(comps, values):
"""
Compute the isin boolean array
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -969,7 +969,7 @@ def unique(self):
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.nanops import unique1d
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result

Expand Down
3 changes: 1 addition & 2 deletions pandas/core/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
is_scalar)
from pandas.core.common import is_null_slice

from pandas.core.algorithms import factorize, take_1d
from pandas.core.algorithms import factorize, take_1d, unique1d
from pandas.core.base import (PandasObject, PandasDelegate,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
Expand Down Expand Up @@ -1834,7 +1834,6 @@ def unique(self):
unique values : ``Categorical``
"""

from pandas.core.nanops import unique1d
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
Expand Down
29 changes: 1 addition & 28 deletions pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,8 @@
except ImportError: # pragma: no cover
_USE_BOTTLENECK = False

import pandas.hashtable as _hash
from pandas import compat, lib, algos, tslib
from pandas.types.common import (_ensure_int64, _ensure_object,
_ensure_float64, _get_dtype,
from pandas.types.common import (_get_dtype,
is_float, is_scalar,
is_integer, is_complex, is_float_dtype,
is_complex_dtype, is_integer_dtype,
Expand Down Expand Up @@ -784,28 +782,3 @@ def f(x, y):
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)


def unique1d(values):
"""
Hash table-based unique
"""
if np.issubdtype(values.dtype, np.floating):
table = _hash.Float64HashTable(len(values))
uniques = np.array(table.unique(_ensure_float64(values)),
dtype=np.float64)
elif np.issubdtype(values.dtype, np.datetime64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('M8[ns]')
elif np.issubdtype(values.dtype, np.timedelta64):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
uniques = uniques.view('m8[ns]')
elif np.issubdtype(values.dtype, np.integer):
table = _hash.Int64HashTable(len(values))
uniques = table.unique(_ensure_int64(values))
else:
table = _hash.PyObjectHashTable(len(values))
uniques = table.unique(_ensure_object(values))
return uniques
55 changes: 33 additions & 22 deletions pandas/tests/test_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -277,28 +277,6 @@ def test_factorize_nan(self):
self.assertTrue(
np.array_equal(pd.isnull(key), expected == na_sentinel))

def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)

def _test_vector_resize(htable, uniques, dtype, nvals):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels appends to the vector
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array resizes the vector
uniques.to_array()
htable.get_labels(vals, uniques, 0, -1)

test_cases = [
(hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'),
(hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'),
(hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')]

for (tbl, vect, dtype) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)

def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
Expand Down Expand Up @@ -912,6 +890,39 @@ class TestGroupVarFloat32(tm.TestCase, GroupVarTestMixin):
rtol = 1e-2


class TestHashTable(tm.TestCase):

def test_lookup_nan(self):
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = hashtable.Float64HashTable()
m.map_locations(xs)
self.assert_numpy_array_equal(m.lookup(xs),
np.arange(len(xs), dtype=np.int64))

def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)

def _test_vector_resize(htable, uniques, dtype, nvals):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels appends to the vector
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array resizes the vector
uniques.to_array()
htable.get_labels(vals, uniques, 0, -1)

test_cases = [
(hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'),
(hashtable.StringHashTable, hashtable.ObjectVector, 'object'),
(hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'),
(hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')]

for (tbl, vect, dtype) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)


def test_quantile():
s = Series(np.random.randn(100))

Expand Down
11 changes: 0 additions & 11 deletions pandas/tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1051,17 +1051,6 @@ def test_searchsorted(self):
self.assertTrue(0 <= index <= len(o))


class TestFloat64HashTable(tm.TestCase):

def test_lookup_nan(self):
from pandas.hashtable import Float64HashTable
xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
m = Float64HashTable()
m.map_locations(xs)
self.assert_numpy_array_equal(m.lookup(xs),
np.arange(len(xs), dtype=np.int64))


class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"

Expand Down
4 changes: 2 additions & 2 deletions pandas/tseries/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import numpy as np
from pandas.types.common import _ensure_platform_int
from pandas.core.frame import DataFrame
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms


def pivot_annual(series, freq=None):
Expand Down Expand Up @@ -45,7 +45,7 @@ def pivot_annual(series, freq=None):

index = series.index
year = index.year
years = nanops.unique1d(year)
years = algorithms.unique1d(year)

if freq is not None:
freq = freq.upper()
Expand Down