|
6 | 6 | //! The specialization in this module applies to iterators in the shape of
|
7 | 7 | //! `source.adapter().adapter().adapter().collect::<Vec<U>>()`
|
8 | 8 | //! where `source` is an owning iterator obtained from [`Vec<T>`], [`Box<[T]>`][box] (by conversion to `Vec`)
|
9 |
| -//! or [`BinaryHeap<T>`], the adapters each consume one or more items per step |
10 |
| -//! (represented by [`InPlaceIterable`]), provide transitive access to `source` (via [`SourceIter`]) |
11 |
| -//! and thus the underlying allocation. And finally the layouts of `T` and `U` must |
12 |
| -//! have the same size and alignment, this is currently ensured via const eval instead of trait bounds |
13 |
| -//! in the specialized [`SpecFromIter`] implementation. |
| 9 | +//! or [`BinaryHeap<T>`], the adapters guarantee to consume enough items per step to make room |
| 10 | +//! for the results (represented by [`InPlaceIterable`]), provide transitive access to `source` |
| 11 | +//! (via [`SourceIter`]) and thus the underlying allocation. |
| 12 | +//! And finally there are alignment and size constriants to consider, this is currently ensured via |
| 13 | +//! const eval instead of trait bounds in the specialized [`SpecFromIter`] implementation. |
14 | 14 | //!
|
15 | 15 | //! [`BinaryHeap<T>`]: crate::collections::BinaryHeap
|
16 | 16 | //! [box]: crate::boxed::Box
|
|
35 | 35 | //! the step of reading a value and getting a reference to write to. Instead raw pointers must be
|
36 | 36 | //! used on the reader and writer side.
|
37 | 37 | //!
|
38 |
| -//! That writes never clobber a yet-to-be-read item is ensured by the [`InPlaceIterable`] requirements. |
| 38 | +//! That writes never clobber a yet-to-be-read items is ensured by the [`InPlaceIterable`] requirements. |
39 | 39 | //!
|
40 | 40 | //! # Layout constraints
|
41 | 41 | //!
|
42 |
| -//! [`Allocator`] requires that `allocate()` and `deallocate()` have matching alignment and size. |
| 42 | +//! When recycling an allocation between different types we must uphold the [`Allocator`] contract |
| 43 | +//! which means that the input and output Layouts have to "fit". |
| 44 | +//! |
| 45 | +//! To complicate things further `InPlaceIterable` supports splitting or merging items into smaller/ |
| 46 | +//! larger ones to enable (de)aggregation of arrays. |
| 47 | +//! |
| 48 | +//! Ultimately each step of the iterator must free up enough *bytes* in the source to make room |
| 49 | +//! for the next output item. |
| 50 | +//! If `T` and `U` have the same size no fixup is needed. |
| 51 | +//! If `T`'s size is a multiple of `U`'s we can compensate by multiplying the capacity accordingly. |
| 52 | +//! Otherwise the input capacity (and thus layout) in bytes may not be representable by the output |
| 53 | +//! `Vec<U>`. In that case `alloc.shrink()` is used to update the allocation's layout. |
| 54 | +//! |
| 55 | +//! Currently alignments of `T` and `U` must be the same. In principle smaller output alignments |
| 56 | +//! could be supported but that would require always calling `alloc.shrink` for those transformations. |
| 57 | +//! |
| 58 | +//! See `in_place_collectible()` for the current conditions. |
| 59 | +//! |
43 | 60 | //! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to
|
44 | 61 | //! avoid and it would make pointer arithmetic more difficult.
|
45 | 62 | //!
|
@@ -163,7 +180,7 @@ const fn in_place_collectible<DEST, SRC>(
|
163 | 180 | // - 4 x u8 -> 1x [u8; 4], via array_chunks
|
164 | 181 | mem::size_of::<SRC>() * step_merge.get() == mem::size_of::<DEST>() * step_expand.get()
|
165 | 182 | }
|
166 |
| - // Fall back to other from_iter impls if an overflow occured in the step merge/expansion |
| 183 | + // Fall back to other from_iter impls if an overflow occurred in the step merge/expansion |
167 | 184 | // tracking.
|
168 | 185 | _ => false,
|
169 | 186 | }
|
|
0 commit comments