@@ -366,16 +366,18 @@ unsafe impl<T: Send> Send for TypedArena<T> {}
366
366
367
367
#[ inline( always) ]
368
368
fn align_down ( val : usize , align : usize ) -> usize {
369
- assert ! ( align. is_power_of_two( ) ) ;
369
+ debug_assert ! ( align. is_power_of_two( ) ) ;
370
370
val & !( align - 1 )
371
371
}
372
372
373
373
#[ inline( always) ]
374
- fn align ( val : usize , align : usize ) -> usize {
375
- assert ! ( align. is_power_of_two( ) ) ;
374
+ fn align_up ( val : usize , align : usize ) -> usize {
375
+ debug_assert ! ( align. is_power_of_two( ) ) ;
376
376
( val + align - 1 ) & !( align - 1 )
377
377
}
378
378
379
+ // Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
380
+ // to optimize away alignment code.
379
381
const DROPLESS_ALIGNMENT : usize = mem:: align_of :: < usize > ( ) ;
380
382
381
383
/// An arena that can hold objects of multiple different types that impl `Copy`
@@ -390,6 +392,8 @@ pub struct DroplessArena {
390
392
/// start. (This is slightly simpler and faster than allocating upwards,
391
393
/// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
392
394
/// When this pointer crosses the start pointer, a new chunk is allocated.
395
+ ///
396
+ /// This is kept aligned to DROPLESS_ALIGNMENT.
393
397
end : Cell < * mut u8 > ,
394
398
395
399
/// A vector of arena chunks.
@@ -433,13 +437,16 @@ impl DroplessArena {
433
437
// Also ensure that this chunk can fit `additional`.
434
438
new_cap = cmp:: max ( additional, new_cap) ;
435
439
436
- let mut chunk = ArenaChunk :: new ( align ( new_cap, PAGE ) ) ;
440
+ let mut chunk = ArenaChunk :: new ( align_up ( new_cap, PAGE ) ) ;
437
441
self . start . set ( chunk. start ( ) ) ;
438
442
439
443
// Align the end to DROPLESS_ALIGNMENT
440
444
let end = align_down ( chunk. end ( ) . addr ( ) , DROPLESS_ALIGNMENT ) ;
441
- // Make sure we don't go past `start`
442
- let end = cmp:: max ( chunk. start ( ) . addr ( ) , end) ;
445
+
446
+ // Make sure we don't go past `start`. This should not happen since the allocation
447
+ // should be at least DROPLESS_ALIGNMENT - 1 bytes.
448
+ debug_assert ! ( chunk. start( ) . addr( ) <= end) ;
449
+
443
450
self . end . set ( chunk. end ( ) . with_addr ( end) ) ;
444
451
445
452
chunks. push ( chunk) ;
@@ -469,14 +476,16 @@ impl DroplessArena {
469
476
let end = old_end. addr ( ) ;
470
477
471
478
// Align allocated bytes so that `self.end` stays aligned to DROPLESS_ALIGNMENT
472
- let bytes = align ( layout. size ( ) , DROPLESS_ALIGNMENT ) ;
479
+ let bytes = align_up ( layout. size ( ) , DROPLESS_ALIGNMENT ) ;
473
480
474
481
// Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT
475
482
unsafe { intrinsics:: assume ( end == align_down ( end, DROPLESS_ALIGNMENT ) ) } ;
476
483
477
484
let new_end = align_down ( end. checked_sub ( bytes) ?, layout. align ( ) ) ;
478
485
if start <= new_end {
479
486
let new_end = old_end. with_addr ( new_end) ;
487
+ // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down` preserves alignment
488
+ // as both `end` and `bytes` are already aligned to DROPLESS_ALIGNMENT.
480
489
self . end . set ( new_end) ;
481
490
Some ( new_end)
482
491
} else {
0 commit comments