Skip to content

FIX value_counts should skip NaT #7424

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 17, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions doc/source/v0.14.1.txt
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ Enhancements




- Add ``dropna`` argument to ``value_counts`` and ``nunique`` (:issue:`5569`).



Expand Down Expand Up @@ -159,7 +159,7 @@ Bug Fixes




- Bug in ``value_counts`` where ``NaT`` did not qualify as missing (``NaN``) (:issue:`7423`)



Expand Down
20 changes: 14 additions & 6 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def factorize(values, sort=False, order=None, na_sentinel=-1):


def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None):
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values

Expand All @@ -184,6 +184,8 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default False
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this should be True?

Don't include counts of NaN

Returns
-------
Expand All @@ -202,25 +204,31 @@ def value_counts(values, sort=True, ascending=False, normalize=False,
raise TypeError("bins argument only works with numeric data.")
values = cat.labels

if com.is_integer_dtype(values.dtype):
dtype = values.dtype
if com.is_integer_dtype(dtype):
values = com._ensure_int64(values)
keys, counts = htable.value_count_int64(values)

elif issubclass(values.dtype.type, (np.datetime64, np.timedelta64)):
dtype = values.dtype
values = values.view(np.int64)
keys, counts = htable.value_count_int64(values)

if dropna:
from pandas.tslib import iNaT
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
# convert the keys back to the dtype we came in
keys = Series(keys, dtype=dtype)
keys = keys.astype(dtype)

else:
mask = com.isnull(values)
values = com._ensure_object(values)
mask = com.isnull(values)
keys, counts = htable.value_count_object(values, mask)
if not dropna:
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())

result = Series(counts, index=com._values_from_object(keys))

if bins is not None:
# TODO: This next line should be more efficient
result = result.reindex(np.arange(len(cat.levels)), fill_value=0)
Expand Down
10 changes: 6 additions & 4 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def min(self):
return pandas.core.nanops.nanmin(self.values)

def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None):
bins=None, dropna=True):
"""
Returns object containing counts of unique values. The resulting object
will be in descending order so that the first element is the most
Expand All @@ -263,14 +263,16 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for pd.cut, only works with numeric data
dropna : boolean, default False
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here

Don't include counts of NaN

Returns
-------
counts : Series
"""
from pandas.core.algorithms import value_counts
return value_counts(self.values, sort=sort, ascending=ascending,
normalize=normalize, bins=bins)
normalize=normalize, bins=bins, dropna=dropna)

def unique(self):
"""
Expand All @@ -284,15 +286,15 @@ def unique(self):
from pandas.core.nanops import unique1d
return unique1d(self.values)

def nunique(self):
def nunique(self, dropna=True):
"""
Return count of unique elements in the object. Excludes NA values.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should add explanation of the new parameter (just as in value_counts)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that looks right (should be True) as then keeps the API the same. you want to fix?

Returns
-------
nunique : int
"""
return len(self.value_counts())
return len(self.value_counts(dropna=dropna))

def factorize(self, sort=False, na_sentinel=-1):
"""
Expand Down
13 changes: 13 additions & 0 deletions pandas/tests/test_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,19 @@ def test_value_counts_dtypes(self):

self.assertRaises(TypeError, lambda s: algos.value_counts(s, bins=1), ['1', 1])

def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])

for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
self.assertEqual(len(vc), 1)
self.assertEqual(len(vc_with_na), 2)

exp_dt = pd.Series({pd.Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)

def test_quantile():
s = Series(np.random.randn(100))
Expand Down
21 changes: 12 additions & 9 deletions pandas/tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,12 +292,13 @@ def test_value_counts_unique_nunique(self):
o = klass(np.repeat(values, range(1, len(o) + 1)))

if isinstance(o, DatetimeIndex):
# DatetimeIndex: nan is casted to Nat and included
expected_s = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
# nan is excluded
expected_s = Series(range(10, 2, -1), index=values[9:1:-1], dtype='int64')
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')

tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)

# numpy_array_equal cannot compare arrays includes nan
Expand All @@ -309,10 +310,8 @@ def test_value_counts_unique_nunique(self):
else:
self.assertTrue(pd.isnull(result[0]))

if isinstance(o, DatetimeIndex):
self.assertEqual(o.nunique(), 9)
else:
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)

def test_value_counts_inferred(self):
klasses = [Index, Series]
Expand Down Expand Up @@ -406,6 +405,9 @@ def test_value_counts_inferred(self):

result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
tm.assert_series_equal(result, expected_s)

result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)

Expand All @@ -415,7 +417,8 @@ def test_value_counts_inferred(self):
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT)

self.assertEqual(s.nunique(), 4)
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)

# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
Expand Down
9 changes: 6 additions & 3 deletions pandas/tseries/tests/test_timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,16 +106,19 @@ def test_index_unique(self):
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))

# NaT
# NaT, note this is excluded
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
self.assertEqual(idx.nunique(), 20)
self.assertEqual(idx.nunique(dropna=False), 21)

arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
self.assertEqual(idx.nunique(), 20)
self.assertEqual(idx.nunique(dropna=False), 21)


def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
Expand Down