Skip to content

Commit f7d8f5b

Browse files
authored
Rollup merge of rust-lang#96162 - RalfJung:mark-uninit, r=oli-obk
interpret: Fix writing uninit to an allocation When calling `mark_init`, we need to also be mindful of what happens with the relocations! Specifically, when we de-init memory, we need to clear relocations in that range as well or else strange things will happen (and printing will not show the de-init, since relocations take precedence there). Fixes rust-lang/miri#2068. Here's the Miri testcase that this fixes (requires `-Zmiri-disable-validation`): ```rust use std::mem::MaybeUninit; fn main() { unsafe { let mut x = MaybeUninit::<i64>::uninit(); // Put in a ptr. x.as_mut_ptr().cast::<&i32>().write_unaligned(&0); // Overwrite parts of that pointer with 'uninit' through a Scalar. let ptr = x.as_mut_ptr().cast::<i32>(); *ptr = MaybeUninit::uninit().assume_init(); // Reading this back should hence work fine. let _c = *ptr; } } ``` Previously this failed with ``` error: unsupported operation: unable to turn pointer into raw bytes --> ../miri/uninit.rs:11:14 | 11 | let _c = *ptr; | ^^^^ unable to turn pointer into raw bytes | = help: this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support = note: inside `main` at ../miri/uninit.rs:11:14 ```
2 parents 113f079 + 05489e7 commit f7d8f5b

File tree

4 files changed

+38
-13
lines changed

4 files changed

+38
-13
lines changed

compiler/rustc_const_eval/src/interpret/memory.rs

+9-4
Original file line numberDiff line numberDiff line change
@@ -892,8 +892,11 @@ impl<'tcx, 'a, Tag: Provenance, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
892892
}
893893

894894
/// Mark the entire referenced range as uninitalized
895-
pub fn write_uninit(&mut self) {
896-
self.alloc.mark_init(self.range, false);
895+
pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
896+
Ok(self
897+
.alloc
898+
.write_uninit(&self.tcx, self.range)
899+
.map_err(|e| e.to_interp_error(self.alloc_id))?)
897900
}
898901
}
899902

@@ -1053,8 +1056,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
10531056
// This also avoids writing to the target bytes so that the backing allocation is never
10541057
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
10551058
// operating system this can avoid physically allocating the page.
1056-
dest_alloc.mark_init(dest_range, false); // `Size` multiplication
1057-
dest_alloc.mark_relocation_range(relocations);
1059+
dest_alloc
1060+
.write_uninit(&tcx, dest_range)
1061+
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
1062+
// We can forget about the relocations, this is all not initialized anyway.
10581063
return Ok(());
10591064
}
10601065

compiler/rustc_const_eval/src/interpret/place.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -823,7 +823,7 @@ where
823823
// Zero-sized access
824824
return Ok(());
825825
};
826-
alloc.write_uninit();
826+
alloc.write_uninit()?;
827827
Ok(())
828828
}
829829

compiler/rustc_middle/src/mir/interpret/allocation.rs

+27-8
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
269269
/// `get_bytes_with_uninit_and_ptr` instead,
270270
///
271271
/// This function also guarantees that the resulting pointer will remain stable
272-
/// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
272+
/// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
273273
/// on that.
274274
///
275275
/// It is the caller's responsibility to check bounds and alignment beforehand.
@@ -429,8 +429,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
429429
let val = match val {
430430
ScalarMaybeUninit::Scalar(scalar) => scalar,
431431
ScalarMaybeUninit::Uninit => {
432-
self.mark_init(range, false);
433-
return Ok(());
432+
return self.write_uninit(cx, range);
434433
}
435434
};
436435

@@ -455,6 +454,13 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
455454

456455
Ok(())
457456
}
457+
458+
/// Write "uninit" to the given memory range.
459+
pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
460+
self.mark_init(range, false);
461+
self.clear_relocations(cx, range)?;
462+
return Ok(());
463+
}
458464
}
459465

460466
/// Relocations.
@@ -561,8 +567,10 @@ impl<Tag> Deref for Relocations<Tag> {
561567
}
562568

563569
/// A partial, owned list of relocations to transfer into another allocation.
570+
///
571+
/// Offsets are already adjusted to the destination allocation.
564572
pub struct AllocationRelocations<Tag> {
565-
relative_relocations: Vec<(Size, Tag)>,
573+
dest_relocations: Vec<(Size, Tag)>,
566574
}
567575

568576
impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
@@ -575,12 +583,17 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
575583
) -> AllocationRelocations<Tag> {
576584
let relocations = self.get_relocations(cx, src);
577585
if relocations.is_empty() {
578-
return AllocationRelocations { relative_relocations: Vec::new() };
586+
return AllocationRelocations { dest_relocations: Vec::new() };
579587
}
580588

581589
let size = src.size;
582590
let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
583591

592+
// If `count` is large, this is rather wasteful -- we are allocating a big array here, which
593+
// is mostly filled with redundant information since it's just N copies of the same `Tag`s
594+
// at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
595+
// we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
596+
// the right sequence of relocations for all N copies.
584597
for i in 0..count {
585598
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
586599
// compute offset for current repetition
@@ -593,14 +606,17 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
593606
}));
594607
}
595608

596-
AllocationRelocations { relative_relocations: new_relocations }
609+
AllocationRelocations { dest_relocations: new_relocations }
597610
}
598611

599612
/// Applies a relocation copy.
600613
/// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
601614
/// to be clear of relocations.
615+
///
616+
/// This is dangerous to use as it can violate internal `Allocation` invariants!
617+
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
602618
pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
603-
self.relocations.0.insert_presorted(relocations.relative_relocations);
619+
self.relocations.0.insert_presorted(relocations.dest_relocations);
604620
}
605621
}
606622

@@ -1056,7 +1072,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
10561072
})
10571073
}
10581074

1059-
pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
1075+
fn mark_init(&mut self, range: AllocRange, is_init: bool) {
10601076
if range.size.bytes() == 0 {
10611077
return;
10621078
}
@@ -1118,6 +1134,9 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
11181134
}
11191135

11201136
/// Applies multiple instances of the run-length encoding to the initialization mask.
1137+
///
1138+
/// This is dangerous to use as it can violate internal `Allocation` invariants!
1139+
/// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
11211140
pub fn mark_compressed_init_range(
11221141
&mut self,
11231142
defined: &InitMaskCompressed,

compiler/rustc_middle/src/mir/pretty.rs

+1
Original file line numberDiff line numberDiff line change
@@ -851,6 +851,7 @@ fn write_allocation_bytes<'tcx, Tag: Provenance, Extra>(
851851
}
852852
if let Some(&tag) = alloc.relocations().get(&i) {
853853
// Memory with a relocation must be defined
854+
assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
854855
let j = i.bytes_usize();
855856
let offset = alloc
856857
.inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());

0 commit comments

Comments
 (0)