Skip to content

Commit 07c9ba5

Browse files
committed
BUG: Fix initialization of DataFrame from dict with NaN as key
closes #18455
1 parent 5052842 commit 07c9ba5

File tree

7 files changed

+77
-43
lines changed

7 files changed

+77
-43
lines changed

doc/source/whatsnew/v0.23.0.txt

+2
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,7 @@ Reshaping
734734
- Bug in :func:`DataFrame.unstack` which casts int to float if ``columns`` is a ``MultiIndex`` with unused levels (:issue:`17845`)
735735
- Bug in :func:`DataFrame.unstack` which raises an error if ``index`` is a ``MultiIndex`` with unused labels on the unstacked level (:issue:`18562`)
736736
- Fixed construction of a :class:`Series` from a ``dict`` containing ``NaN`` as key (:issue:`18480`)
737+
- Fixed construction of a :class:`DataFrame` from a ``dict`` containing ``NaN`` as key (:issue:`18455`)
737738
- Bug in :func:`Series.rank` where ``Series`` containing ``NaT`` modifies the ``Series`` inplace (:issue:`18521`)
738739
- Bug in :func:`cut` which fails when using readonly arrays (:issue:`18773`)
739740
- Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`)
@@ -762,3 +763,4 @@ Other
762763
^^^^^
763764

764765
- Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`)
766+
- Suppressed error in the construction of a :class:`DataFrame` from a ``dict`` containing scalar values when the corresponding keys are not included in the passed index (:issue:`18600`)

pandas/core/frame.py

+17-34
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from pandas.core.dtypes.cast import (
2828
maybe_upcast,
2929
cast_scalar_to_array,
30+
construct_1d_arraylike_from_scalar,
3031
maybe_cast_to_datetime,
3132
maybe_infer_to_datetimelike,
3233
maybe_convert_platform,
@@ -418,44 +419,28 @@ def _init_dict(self, data, index, columns, dtype=None):
418419
Needs to handle a lot of exceptional cases.
419420
"""
420421
if columns is not None:
421-
columns = _ensure_index(columns)
422+
arrays = Series(data, index=columns, dtype=object)
423+
data_names = arrays.index
422424

423-
# GH10856
424-
# raise ValueError if only scalars in dict
425+
missing = arrays.isnull()
425426
if index is None:
426-
extract_index(list(data.values()))
427-
428-
# prefilter if columns passed
429-
data = {k: v for k, v in compat.iteritems(data) if k in columns}
430-
431-
if index is None:
432-
index = extract_index(list(data.values()))
433-
427+
# GH10856
428+
# raise ValueError if only scalars in dict
429+
index = extract_index(arrays[~missing].tolist())
434430
else:
435431
index = _ensure_index(index)
436432

437-
arrays = []
438-
data_names = []
439-
for k in columns:
440-
if k not in data:
441-
# no obvious "empty" int column
442-
if dtype is not None and issubclass(dtype.type,
443-
np.integer):
444-
continue
445-
446-
if dtype is None:
447-
# 1783
448-
v = np.empty(len(index), dtype=object)
449-
elif np.issubdtype(dtype, np.flexible):
450-
v = np.empty(len(index), dtype=object)
451-
else:
452-
v = np.empty(len(index), dtype=dtype)
453-
454-
v.fill(np.nan)
433+
# no obvious "empty" int column
434+
if missing.any() and not is_integer_dtype(dtype):
435+
if dtype is None or np.issubdtype(dtype, np.flexible):
436+
# 1783
437+
nan_dtype = object
455438
else:
456-
v = data[k]
457-
data_names.append(k)
458-
arrays.append(v)
439+
nan_dtype = dtype
440+
v = construct_1d_arraylike_from_scalar(np.nan, len(index),
441+
nan_dtype)
442+
arrays.loc[missing] = [v] * missing.sum()
443+
arrays = arrays.tolist()
459444

460445
else:
461446
keys = list(data.keys())
@@ -6145,8 +6130,6 @@ def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
61456130
# figure out the index, if necessary
61466131
if index is None:
61476132
index = extract_index(arrays)
6148-
else:
6149-
index = _ensure_index(index)
61506133

61516134
# don't force copy because getting jammed in an ndarray anyway
61526135
arrays = _homogenize(arrays, index, dtype)

pandas/core/generic.py

-1
Original file line numberDiff line numberDiff line change
@@ -6468,7 +6468,6 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
64686468
if not is_bool_dtype(dt):
64696469
raise ValueError(msg.format(dtype=dt))
64706470

6471-
cond = cond.astype(bool, copy=False)
64726471
cond = -cond if inplace else cond
64736472

64746473
# try to align with other

pandas/core/internals.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4767,7 +4767,7 @@ def form_blocks(arrays, names, axes):
47674767
items_dict = defaultdict(list)
47684768
extra_locs = []
47694769

4770-
names_idx = Index(names)
4770+
names_idx = _ensure_index(names)
47714771
if names_idx.equals(axes[0]):
47724772
names_indexer = np.arange(len(names_idx))
47734773
else:

pandas/core/series.py

+10-2
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
is_extension_type,
2323
is_datetime64tz_dtype,
2424
is_timedelta64_dtype,
25+
is_object_dtype,
2526
is_list_like,
2627
is_hashable,
2728
is_iterator,
@@ -36,7 +37,8 @@
3637
maybe_upcast, infer_dtype_from_scalar,
3738
maybe_convert_platform,
3839
maybe_cast_to_datetime, maybe_castable,
39-
construct_1d_arraylike_from_scalar)
40+
construct_1d_arraylike_from_scalar,
41+
construct_1d_object_array_from_listlike)
4042
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
4143

4244
from pandas.core.index import (Index, MultiIndex, InvalidIndexError,
@@ -268,6 +270,7 @@ def _init_dict(self, data, index=None, dtype=None):
268270
# raises KeyError), so we iterate the entire dict, and align
269271
if data:
270272
keys, values = zip(*compat.iteritems(data))
273+
values = list(values)
271274
else:
272275
keys, values = [], []
273276

@@ -3165,7 +3168,12 @@ def _try_cast(arr, take_fast_path):
31653168

31663169
try:
31673170
subarr = maybe_cast_to_datetime(arr, dtype)
3168-
if not is_extension_type(subarr):
3171+
# Take care in creating object arrays (but generators are not
3172+
# supported, hence the __len__ check):
3173+
if is_object_dtype(dtype) and (hasattr(subarr, '__len__') and
3174+
not isinstance(subarr, np.ndarray)):
3175+
subarr = construct_1d_object_array_from_listlike(subarr)
3176+
elif not is_extension_type(subarr):
31693177
subarr = np.array(subarr, dtype=dtype, copy=copy)
31703178
except (ValueError, TypeError):
31713179
if is_categorical_dtype(dtype):

pandas/tests/frame/test_constructors.py

+46-4
Original file line numberDiff line numberDiff line change
@@ -287,8 +287,50 @@ def test_constructor_dict(self):
287287
with tm.assert_raises_regex(ValueError, msg):
288288
DataFrame({'a': 0.7}, columns=['a'])
289289

290-
with tm.assert_raises_regex(ValueError, msg):
291-
DataFrame({'a': 0.7}, columns=['b'])
290+
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
291+
def test_constructor_invalid_items_unused(self, scalar):
292+
# No error if invalid (scalar) value is in fact not used:
293+
result = DataFrame({'a': scalar}, columns=['b'])
294+
expected = DataFrame(columns=['b'])
295+
tm.assert_frame_equal(result, expected)
296+
297+
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
298+
def test_constructor_dict_nan_key(self, value):
299+
# GH 18455
300+
cols = [1, value, 3]
301+
idx = ['a', value]
302+
values = [[0, 3], [1, 4], [2, 5]]
303+
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
304+
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
305+
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
306+
index=idx, columns=cols)
307+
tm.assert_frame_equal(result, expected)
308+
309+
result = DataFrame(data, index=idx).sort_values('a', axis=1)
310+
tm.assert_frame_equal(result, expected)
311+
312+
result = DataFrame(data, index=idx, columns=cols)
313+
tm.assert_frame_equal(result, expected)
314+
315+
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
316+
def test_constructor_dict_nan_tuple_key(self, value):
317+
# GH 18455
318+
cols = Index([(11, 21), (value, 22), (13, value)])
319+
idx = Index([('a', value), (value, 2)])
320+
values = [[0, 3], [1, 4], [2, 5]]
321+
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
322+
result = (DataFrame(data)
323+
.sort_values((11, 21))
324+
.sort_values(('a', value), axis=1))
325+
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
326+
index=idx, columns=cols)
327+
tm.assert_frame_equal(result, expected)
328+
329+
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
330+
tm.assert_frame_equal(result, expected)
331+
332+
result = DataFrame(data, index=idx, columns=cols)
333+
tm.assert_frame_equal(result, expected)
292334

293335
def test_constructor_multi_index(self):
294336
# GH 4078
@@ -735,15 +777,15 @@ def test_constructor_corner(self):
735777

736778
# does not error but ends up float
737779
df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)
738-
assert df.values.dtype == np.object_
780+
assert df.values.dtype == np.dtype('float64')
739781

740782
# #1783 empty dtype object
741783
df = DataFrame({}, columns=['foo', 'bar'])
742784
assert df.values.dtype == np.object_
743785

744786
df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),
745787
dtype=int)
746-
assert df.values.dtype == np.object_
788+
assert df.values.dtype == np.dtype('float64')
747789

748790
def test_constructor_scalar_inference(self):
749791
data = {'int': 1, 'bool': True,

pandas/tests/io/test_excel.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -511,7 +511,7 @@ def test_read_one_empty_col_with_header(self):
511511
)
512512
expected_header_none = DataFrame(pd.Series([0], dtype='int64'))
513513
tm.assert_frame_equal(actual_header_none, expected_header_none)
514-
expected_header_zero = DataFrame(columns=[0], dtype='int64')
514+
expected_header_zero = DataFrame(columns=[0])
515515
tm.assert_frame_equal(actual_header_zero, expected_header_zero)
516516

517517
def test_set_column_names_in_parameter(self):

0 commit comments

Comments
 (0)