Skip to content

API / CoW: DataFrame(<dict of Series>, copy=False) constructor now gives lazy copy #50777

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
6ce3306
API / CoW: DataFrame(<dict of Series>, copy=False) constructor now gi…
jorisvandenbossche Jan 16, 2023
104cab9
expand test
jorisvandenbossche Jan 16, 2023
e192a63
try fix mypy
jorisvandenbossche Jan 16, 2023
0b4f337
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Jan 17, 2023
a300b08
clean-up usage of internals in reshape/concat.py
jorisvandenbossche Jan 17, 2023
df21ca2
further fix mypy
jorisvandenbossche Jan 17, 2023
bc5d77a
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Jan 20, 2023
e7d2f0f
expand tests
jorisvandenbossche Jan 20, 2023
27e8549
fix reindex case
jorisvandenbossche Jan 20, 2023
a05c066
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Jan 23, 2023
10238ba
clean-up
jorisvandenbossche Jan 23, 2023
6d860bc
add whatsnew
jorisvandenbossche Jan 23, 2023
5bb6ab0
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Jan 30, 2023
b24ea5f
add xfailed test for int64->Int64
jorisvandenbossche Jan 30, 2023
941676b
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Feb 10, 2023
6e32a34
refactor
jorisvandenbossche Feb 10, 2023
82bc4bb
cleanup
jorisvandenbossche Feb 10, 2023
7fc8594
fix for AM
jorisvandenbossche Feb 10, 2023
9d11606
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Feb 10, 2023
3d2d5e6
fix + address feedback
jorisvandenbossche Feb 10, 2023
48a00a1
fixup typing
jorisvandenbossche Feb 10, 2023
9a2f97c
Merge remote-tracking branch 'upstream/main' into cow-dataframe-const…
jorisvandenbossche Feb 10, 2023
5476780
try fix typing
jorisvandenbossche Feb 10, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions doc/source/whatsnew/v2.0.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,10 @@ Copy-on-Write improvements
a modification to the data happens) when constructing a Series from an existing
Series with the default of ``copy=False`` (:issue:`50471`)

- The :class:`DataFrame` constructor, when constructing a DataFrame from a dictionary
of Series objects and specifying ``copy=False``, will now use a lazy copy
of those Series objects for the columns of the DataFrame (:issue:`50777`)

- Trying to set values using chained assignment (for example, ``df["a"][1:3] = 0``)
will now always raise an exception when Copy-on-Write is enabled. In this mode,
chained assignment can never work because we are always setting into a temporary
Expand Down
6 changes: 4 additions & 2 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -2167,15 +2167,17 @@ def get_block_type(dtype: DtypeObj):
return cls


def new_block_2d(values: ArrayLike, placement: BlockPlacement):
def new_block_2d(
values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None
):
# new_block specialized to case with
# ndim=2
# isinstance(placement, BlockPlacement)
# check_ndim/ensure_block_shape already checked
klass = get_block_type(values.dtype)

values = maybe_coerce_values(values)
return klass(values, ndim=2, placement=placement)
return klass(values, ndim=2, placement=placement, refs=refs)


def new_block(
Expand Down
25 changes: 19 additions & 6 deletions pandas/core/internals/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def arrays_to_mgr(
index = ensure_index(index)

# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
arrays, refs = _homogenize(arrays, index, dtype)
# _homogenize ensures
# - all(len(x) == len(index) for x in arrays)
# - all(x.ndim == 1 for x in arrays)
Expand All @@ -126,8 +126,10 @@ def arrays_to_mgr(
else:
index = ensure_index(index)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
# with _from_arrays, the passed arrays should never be Series objects
refs = [None] * len(arrays)

# Reached via DataFrame._from_arrays; we do validation here
# Reached via DataFrame._from_arrays; we do minimal validation here
for arr in arrays:
if (
not isinstance(arr, (np.ndarray, ExtensionArray))
Expand All @@ -148,7 +150,7 @@ def arrays_to_mgr(

if typ == "block":
return create_block_manager_from_column_arrays(
arrays, axes, consolidate=consolidate
arrays, axes, consolidate=consolidate, refs=refs
)
elif typ == "array":
return ArrayManager(arrays, [index, columns])
Expand Down Expand Up @@ -547,19 +549,29 @@ def _ensure_2d(values: np.ndarray) -> np.ndarray:
return values


def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]:
def _homogenize(
data, index: Index, dtype: DtypeObj | None
) -> tuple[list[ArrayLike], list[Any]]:
oindex = None
homogenized = []
# if the original array-like in `data` is a Series, keep track of this Series' refs
refs: list[Any] = []

for val in data:
if isinstance(val, ABCSeries):
orig = val
if dtype is not None:
val = val.astype(dtype, copy=False)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)

if val is orig:
val = val.copy(deep=False)
if isinstance(val._mgr, BlockManager):
refs.append(val._mgr._block.refs)
else:
refs.append(None)
val = val._values
else:
if isinstance(val, dict):
Expand All @@ -578,10 +590,11 @@ def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]:

val = sanitize_array(val, index, dtype=dtype, copy=False)
com.require_length_match(val, index)
refs.append(None)

homogenized.append(val)

return homogenized
return homogenized, refs


def _extract_index(data) -> Index:
Expand Down
19 changes: 13 additions & 6 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2076,6 +2076,7 @@ def create_block_manager_from_column_arrays(
arrays: list[ArrayLike],
axes: list[Index],
consolidate: bool = True,
refs: list = None,
) -> BlockManager:
# Assertions disabled for performance (caller is responsible for verifying)
# assert isinstance(axes, list)
Expand All @@ -2089,7 +2090,7 @@ def create_block_manager_from_column_arrays(
# verify_integrity=False below.

try:
blocks = _form_blocks(arrays, consolidate)
blocks = _form_blocks(arrays, consolidate, refs)
mgr = BlockManager(blocks, axes, verify_integrity=False)
except ValueError as e:
raise_construction_error(len(arrays), arrays[0].shape, axes, e)
Expand Down Expand Up @@ -2143,13 +2144,17 @@ def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, bool, DtypeObj]:
return sep, isinstance(dtype, np.dtype), dtype


def _form_blocks(arrays: list[ArrayLike], consolidate: bool) -> list[Block]:
def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]:
tuples = list(enumerate(arrays))

if not consolidate:
nbs = _tuples_to_blocks_no_consolidate(tuples)
nbs = _tuples_to_blocks_no_consolidate(tuples, refs)
return nbs

# when consolidating, we can ignore refs (either stacking always copies,
# or the EA is already copied in the calling dict_to_mgr)
# TODO(CoW) check if this is also valid for rec_array_to_mgr

# group by dtype
grouper = itertools.groupby(tuples, _grouping_func)

Expand Down Expand Up @@ -2187,11 +2192,13 @@ def _form_blocks(arrays: list[ArrayLike], consolidate: bool) -> list[Block]:
return nbs


def _tuples_to_blocks_no_consolidate(tuples) -> list[Block]:
def _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]:
# tuples produced within _form_blocks are of the form (placement, array)
return [
new_block_2d(ensure_block_shape(x[1], ndim=2), placement=BlockPlacement(x[0]))
for x in tuples
new_block_2d(
ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref
)
for ((i, arr), ref) in zip(tuples, refs)
]


Expand Down
35 changes: 6 additions & 29 deletions pandas/core/reshape/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,7 @@

import numpy as np

from pandas._config import (
get_option,
using_copy_on_write,
)
from pandas._config import using_copy_on_write

from pandas._typing import (
Axis,
Expand Down Expand Up @@ -52,7 +49,6 @@
get_unanimous_names,
)
from pandas.core.internals import concatenate_managers
from pandas.core.internals.construction import dict_to_mgr

if TYPE_CHECKING:
from pandas import (
Expand Down Expand Up @@ -535,26 +531,18 @@ def __init__(
)

else:
original_obj = obj
name = new_name = getattr(obj, "name", None)
name = getattr(obj, "name", None)
if ignore_index or name is None:
new_name = current_column
name = current_column
current_column += 1

# doing a row-wise concatenation so need everything
# to line up
if self._is_frame and axis == 1:
new_name = 0
name = 0
# mypy needs to know sample is not an NDFrame
sample = cast("DataFrame | Series", sample)
obj = sample._constructor(obj, columns=[name], copy=False)
if using_copy_on_write():
# TODO(CoW): Remove when ref tracking in constructors works
for i, block in enumerate(original_obj._mgr.blocks): # type: ignore[union-attr] # noqa
obj._mgr.blocks[i].refs = block.refs # type: ignore[union-attr] # noqa
obj._mgr.blocks[i].refs.add_reference(obj._mgr.blocks[i]) # type: ignore[arg-type, union-attr] # noqa

obj.columns = [new_name]
obj = sample._constructor({name: obj}, copy=False)

self.objs.append(obj)

Expand Down Expand Up @@ -604,18 +592,7 @@ def get_result(self):
cons = sample._constructor_expanddim

index, columns = self.new_axes
mgr = dict_to_mgr(
data,
index,
None,
copy=self.copy,
typ=get_option("mode.data_manager"),
)
if using_copy_on_write() and not self.copy:
for i, obj in enumerate(self.objs):
mgr.blocks[i].refs = obj._mgr.blocks[0].refs # type: ignore[union-attr] # noqa
mgr.blocks[i].refs.add_reference(mgr.blocks[i]) # type: ignore[arg-type, union-attr] # noqa
df = cons(mgr, copy=False)
df = cons(data, index=index, copy=self.copy)
df.columns = columns
return df.__finalize__(self, method="concat")

Expand Down
83 changes: 82 additions & 1 deletion pandas/tests/copy_view/test_constructors.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import numpy as np
import pytest

from pandas import Series
from pandas import (
DataFrame,
Series,
)
import pandas._testing as tm
from pandas.tests.copy_view.util import get_array

# -----------------------------------------------------------------------------
# Copy/view behaviour for Series / DataFrame constructors
Expand Down Expand Up @@ -75,3 +80,79 @@ def test_series_from_series_with_reindex(using_copy_on_write):
assert not np.shares_memory(ser.values, result.values)
if using_copy_on_write:
assert not result._mgr.blocks[0].refs.has_reference()


@pytest.mark.parametrize("dtype", [None, "int64", "Int64"])
@pytest.mark.parametrize("index", [None, [0, 1, 2]])
@pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]])
def test_dataframe_from_dict_of_series(
request, using_copy_on_write, columns, index, dtype
):
# Case: constructing a DataFrame from Series objects with copy=False
# has to do a lazy following CoW rules
# (the default for DataFrame(dict) is still to copy to ensure consolidation)
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
s1_orig = s1.copy()
expected = DataFrame(
{"a": [1, 2, 3], "b": [4, 5, 6]}, index=index, columns=columns, dtype=dtype
)

result = DataFrame(
{"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False
)

# the shallow copy still shares memory
assert np.shares_memory(get_array(result, "a"), s1.values)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you use get_array here?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also below


# mutating the new dataframe doesn't mutate original
result.iloc[0, 0] = 10
if using_copy_on_write:
assert not np.shares_memory(get_array(result, "a"), s1.values)
tm.assert_series_equal(s1, s1_orig)
else:
assert s1.iloc[0] == 10

# the same when modifying the parent series
result = DataFrame(
{"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False
)
s1.iloc[0] = 10
if using_copy_on_write:
assert not np.shares_memory(get_array(result, "a"), s1.values)
tm.assert_frame_equal(result, expected)
else:
assert result.iloc[0, 0] == 10


@pytest.mark.parametrize("dtype", [None, "int64"])
def test_dataframe_from_dict_of_series_with_reindex(dtype):
# Case: constructing a DataFrame from Series objects with copy=False
# and passing an index that requires an actual (no-view) reindex -> need
# to ensure the result doesn't have refs set up to unnecessarily trigger
# a copy on write
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
df = DataFrame({"a": s1, "b": s2}, index=[1, 2, 3], dtype=dtype, copy=False)

# df should own its memory, so mutating shouldn't trigger a copy
arr_before = get_array(df, "a")
df.iloc[0, 0] = 100
arr_after = get_array(df, "a")
assert np.shares_memory(arr_before, arr_after)


@pytest.mark.parametrize("index", [None, [0, 1, 2]])
def test_dataframe_from_dict_of_series_with_dtype(index):
# Variant of above, but now passing a dtype that causes a copy
# -> need to ensure the result doesn't have refs set up to unnecessarily
# trigger a copy on write
s1 = Series([1.0, 2.0, 3.0])
s2 = Series([4, 5, 6])
df = DataFrame({"a": s1, "b": s2}, index=index, dtype="int64", copy=False)

# df should own its memory, so mutating shouldn't trigger a copy
arr_before = get_array(df, "a")
df.iloc[0, 0] = 100
arr_after = get_array(df, "a")
assert np.shares_memory(arr_before, arr_after)