Skip to content

Commit 6c29b45

Browse files
committed
Auto merge of rust-lang#116224 - nnethercote:rustc_arena-overhaul, r=cjgillot
`rustc_arena` overhaul I find the `rustc_arena` a bit hard to read. These commits make it better. r? `@cjgillot`
2 parents 871407a + bb5344a commit 6c29b45

File tree

1 file changed

+51
-73
lines changed
  • compiler/rustc_arena/src

1 file changed

+51
-73
lines changed

compiler/rustc_arena/src/lib.rs

+51-73
Original file line numberDiff line numberDiff line change
@@ -172,8 +172,8 @@ impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
172172
return &mut [];
173173
}
174174
// Move the content to the arena by copying and then forgetting it.
175+
let start_ptr = arena.alloc_raw_slice(len);
175176
unsafe {
176-
let start_ptr = arena.alloc_raw_slice(len);
177177
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
178178
mem::forget(self);
179179
slice::from_raw_parts_mut(start_ptr, len)
@@ -189,8 +189,8 @@ impl<T> IterExt<T> for Vec<T> {
189189
return &mut [];
190190
}
191191
// Move the content to the arena by copying and then forgetting it.
192+
let start_ptr = arena.alloc_raw_slice(len);
192193
unsafe {
193-
let start_ptr = arena.alloc_raw_slice(len);
194194
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
195195
self.set_len(0);
196196
slice::from_raw_parts_mut(start_ptr, len)
@@ -206,8 +206,8 @@ impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
206206
return &mut [];
207207
}
208208
// Move the content to the arena by copying and then forgetting it.
209+
let start_ptr = arena.alloc_raw_slice(len);
209210
unsafe {
210-
let start_ptr = arena.alloc_raw_slice(len);
211211
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
212212
self.set_len(0);
213213
slice::from_raw_parts_mut(start_ptr, len)
@@ -250,25 +250,20 @@ impl<T> TypedArena<T> {
250250
available_bytes >= additional_bytes
251251
}
252252

253-
/// Ensures there's enough space in the current chunk to fit `len` objects.
254253
#[inline]
255-
fn ensure_capacity(&self, additional: usize) {
256-
if !self.can_allocate(additional) {
257-
self.grow(additional);
258-
debug_assert!(self.can_allocate(additional));
259-
}
260-
}
261-
262-
#[inline]
263-
unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
254+
fn alloc_raw_slice(&self, len: usize) -> *mut T {
264255
assert!(mem::size_of::<T>() != 0);
265256
assert!(len != 0);
266257

267-
self.ensure_capacity(len);
258+
// Ensure the current chunk can fit `len` objects.
259+
if !self.can_allocate(len) {
260+
self.grow(len);
261+
debug_assert!(self.can_allocate(len));
262+
}
268263

269264
let start_ptr = self.ptr.get();
270-
// SAFETY: `self.ensure_capacity` makes sure that there is enough space
271-
// for `len` elements.
265+
// SAFETY: `can_allocate`/`grow` ensures that there is enough space for
266+
// `len` elements.
272267
unsafe { self.ptr.set(start_ptr.add(len)) };
273268
start_ptr
274269
}
@@ -407,6 +402,8 @@ impl Default for DroplessArena {
407402
#[inline]
408403
fn default() -> DroplessArena {
409404
DroplessArena {
405+
// We set both `start` and `end` to 0 so that the first call to
406+
// alloc() will trigger a grow().
410407
start: Cell::new(ptr::null_mut()),
411408
end: Cell::new(ptr::null_mut()),
412409
chunks: Default::default(),
@@ -415,9 +412,11 @@ impl Default for DroplessArena {
415412
}
416413

417414
impl DroplessArena {
415+
#[inline(never)]
416+
#[cold]
418417
fn grow(&self, layout: Layout) {
419418
// Add some padding so we can align `self.end` while
420-
// stilling fitting in a `layout` allocation.
419+
// still fitting in a `layout` allocation.
421420
let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
422421

423422
unsafe {
@@ -441,7 +440,7 @@ impl DroplessArena {
441440
let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
442441
self.start.set(chunk.start());
443442

444-
// Align the end to DROPLESS_ALIGNMENT
443+
// Align the end to DROPLESS_ALIGNMENT.
445444
let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
446445

447446
// Make sure we don't go past `start`. This should not happen since the allocation
@@ -454,69 +453,48 @@ impl DroplessArena {
454453
}
455454
}
456455

457-
#[inline(never)]
458-
#[cold]
459-
fn grow_and_alloc_raw(&self, layout: Layout) -> *mut u8 {
460-
self.grow(layout);
461-
self.alloc_raw_without_grow(layout).unwrap()
462-
}
463-
464-
#[inline(never)]
465-
#[cold]
466-
fn grow_and_alloc<T>(&self) -> *mut u8 {
467-
self.grow_and_alloc_raw(Layout::new::<T>())
468-
}
469-
470-
/// Allocates a byte slice with specified layout from the current memory
471-
/// chunk. Returns `None` if there is no free space left to satisfy the
472-
/// request.
473-
#[inline]
474-
fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> {
475-
let start = self.start.get().addr();
476-
let old_end = self.end.get();
477-
let end = old_end.addr();
478-
479-
// Align allocated bytes so that `self.end` stays aligned to DROPLESS_ALIGNMENT
480-
let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
481-
482-
// Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT
483-
unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
484-
485-
let new_end = align_down(end.checked_sub(bytes)?, layout.align());
486-
if start <= new_end {
487-
let new_end = old_end.with_addr(new_end);
488-
// `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down` preserves alignment
489-
// as both `end` and `bytes` are already aligned to DROPLESS_ALIGNMENT.
490-
self.end.set(new_end);
491-
Some(new_end)
492-
} else {
493-
None
494-
}
495-
}
496-
497456
#[inline]
498457
pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
499458
assert!(layout.size() != 0);
500-
if let Some(a) = self.alloc_raw_without_grow(layout) {
501-
return a;
459+
460+
// This loop executes once or twice: if allocation fails the first
461+
// time, the `grow` ensures it will succeed the second time.
462+
loop {
463+
let start = self.start.get().addr();
464+
let old_end = self.end.get();
465+
let end = old_end.addr();
466+
467+
// Align allocated bytes so that `self.end` stays aligned to
468+
// DROPLESS_ALIGNMENT.
469+
let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
470+
471+
// Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
472+
unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
473+
474+
if let Some(sub) = end.checked_sub(bytes) {
475+
let new_end = align_down(sub, layout.align());
476+
if start <= new_end {
477+
let new_end = old_end.with_addr(new_end);
478+
// `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
479+
// preserves alignment as both `end` and `bytes` are already
480+
// aligned to DROPLESS_ALIGNMENT.
481+
self.end.set(new_end);
482+
return new_end;
483+
}
484+
}
485+
486+
// No free space left. Allocate a new chunk to satisfy the request.
487+
// On failure the grow will panic or abort.
488+
self.grow(layout);
502489
}
503-
// No free space left. Allocate a new chunk to satisfy the request.
504-
// On failure the grow will panic or abort.
505-
self.grow_and_alloc_raw(layout)
506490
}
507491

508492
#[inline]
509493
pub fn alloc<T>(&self, object: T) -> &mut T {
510494
assert!(!mem::needs_drop::<T>());
511495
assert!(mem::size_of::<T>() != 0);
512496

513-
let mem = if let Some(a) = self.alloc_raw_without_grow(Layout::for_value::<T>(&object)) {
514-
a
515-
} else {
516-
// No free space left. Allocate a new chunk to satisfy the request.
517-
// On failure the grow will panic or abort.
518-
self.grow_and_alloc::<T>()
519-
} as *mut T;
497+
let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
520498

521499
unsafe {
522500
// Write into uninitialized memory.
@@ -713,10 +691,10 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
713691
}
714692

715693
#[allow(clippy::mut_from_ref)]
716-
pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>(
717-
&'a self,
694+
pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
695+
&self,
718696
iter: impl ::std::iter::IntoIterator<Item = T>,
719-
) -> &'a mut [T] {
697+
) -> &mut [T] {
720698
T::allocate_from_iter(self, iter)
721699
}
722700
}

0 commit comments

Comments
 (0)