-
-
Notifications
You must be signed in to change notification settings - Fork 18.4k
ENH: Add lazy copy for take and between_time #50476
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 9 commits
7c5513c
a21a2f3
7d48fea
700be46
697bb14
f694113
d66e5e3
258b1bc
36287a3
eabf2fa
297a828
64afeb2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -481,6 +481,39 @@ def test_assign_drop_duplicates(using_copy_on_write, method): | |
tm.assert_frame_equal(df, df_orig) | ||
|
||
|
||
@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})]) | ||
def test_take(using_copy_on_write, obj): | ||
obj_orig = obj.copy() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Small nitpick: can you add a comment here that is testing the corner case of taking all rows? (because in general take always by definition returns a copy) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added |
||
obj2 = obj.take([0, 1]) | ||
|
||
if using_copy_on_write: | ||
assert np.shares_memory(obj2.values, obj.values) | ||
else: | ||
assert not np.shares_memory(obj2.values, obj.values) | ||
|
||
obj2.iloc[0] = 0 | ||
if using_copy_on_write: | ||
assert not np.shares_memory(obj2.values, obj.values) | ||
tm.assert_equal(obj, obj_orig) | ||
|
||
|
||
@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})]) | ||
def test_between_time(using_copy_on_write, obj): | ||
obj.index = date_range("2018-04-09", periods=2, freq="1D20min") | ||
obj_orig = obj.copy() | ||
obj2 = obj.between_time("0:00", "1:00") | ||
|
||
if using_copy_on_write: | ||
assert np.shares_memory(obj2.values, obj.values) | ||
else: | ||
assert not np.shares_memory(obj2.values, obj.values) | ||
|
||
obj2.iloc[0] = 0 | ||
if using_copy_on_write: | ||
assert not np.shares_memory(obj2.values, obj.values) | ||
tm.assert_equal(obj, obj_orig) | ||
|
||
|
||
def test_reindex_like(using_copy_on_write): | ||
df = DataFrame({"a": [1, 2], "b": "a"}) | ||
other = DataFrame({"b": "a", "a": [1, 2]}) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Further idea to potentially speed this up: right now we only use
array_equal_fast
to check an array against a standard 0...n indexer, I think?For that specific case, we don't actually need to create this second array, but could just use the iteration variable inside
array_equal_fast
(it only needs the length).(now, I don't know if we would want to start using
array_equal_fast
for other cases)There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
(that also avoids creating this additional array even for the fast cases where the length doesn't even match)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I’ll take a look at this in a follow up if ok to check how big the performance improvement would be
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Sure