Skip to content

Fix volatile loads and stores #572

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Nov 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion libgccjit.version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
29901846ff610daab8a80436cfe36e93b4b5aa1e
50d1270fd6409407f38b982e606df1dba4bf58ed
18 changes: 12 additions & 6 deletions src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1106,18 +1106,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
val: RValue<'gcc>,
ptr: RValue<'gcc>,
align: Align,
_flags: MemFlags,
flags: MemFlags,
) -> RValue<'gcc> {
let ptr = self.check_store(val, ptr);
let destination = ptr.dereference(self.location);
// NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
// to type so it gets the proper alignment.
let destination_type = destination.to_rvalue().get_type().unqualified();
let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
let aligned_destination = self.cx.context.new_bitcast(self.location, ptr, aligned_type);
let aligned_destination = aligned_destination.dereference(self.location);
self.llbb().add_assignment(self.location, aligned_destination, val);
// TODO(antoyo): handle align and flags.
let align = if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() };
let mut modified_destination_type = destination_type.get_aligned(align);
if flags.contains(MemFlags::VOLATILE) {
modified_destination_type = modified_destination_type.make_volatile();
}

let modified_ptr =
self.cx.context.new_cast(self.location, ptr, modified_destination_type.make_pointer());
let modified_destination = modified_ptr.dereference(self.location);
self.llbb().add_assignment(self.location, modified_destination, val);
// TODO(antoyo): handle `MemFlags::NONTEMPORAL`.
// NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
self.cx.context.new_rvalue_zero(self.type_i32())
}
Expand Down
113 changes: 113 additions & 0 deletions tests/run/volatile2.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
// Compiler:
//
// Run-time:
// status: 0

mod libc {
#[link(name = "c")]
extern "C" {
pub fn puts(s: *const u8) -> i32;

pub fn sigaction(signum: i32, act: *const sigaction, oldact: *mut sigaction) -> i32;
pub fn mmap(addr: *mut (), len: usize, prot: i32, flags: i32, fd: i32, offset: i64) -> *mut ();
pub fn mprotect(addr: *mut (), len: usize, prot: i32) -> i32;
}

pub const PROT_READ: i32 = 1;
pub const PROT_WRITE: i32 = 2;
pub const MAP_PRIVATE: i32 = 0x0002;
pub const MAP_ANONYMOUS: i32 = 0x0020;
pub const MAP_FAILED: *mut u8 = !0 as *mut u8;

/// glibc sigaction
#[repr(C)]
pub struct sigaction {
pub sa_sigaction: Option<unsafe extern "C" fn(i32, *mut (), *mut ())>,
pub sa_mask: [u32; 32],
pub sa_flags: i32,
pub sa_restorer: Option<unsafe extern "C" fn()>,
}

pub const SA_SIGINFO: i32 = 0x00000004;
pub const SIGSEGV: i32 = 11;
}

static mut COUNT: u32 = 0;
static mut STORAGE: *mut u8 = core::ptr::null_mut();
const PAGE_SIZE: usize = 1 << 15;

fn main() {
unsafe {
// Register a segfault handler
libc::sigaction(
libc::SIGSEGV,
&libc::sigaction {
sa_sigaction: Some(segv_handler),
sa_flags: libc::SA_SIGINFO,
..core::mem::zeroed()
},
core::ptr::null_mut(),
);

STORAGE = libc::mmap(
core::ptr::null_mut(),
PAGE_SIZE * 2,
0,
libc::MAP_PRIVATE | libc::MAP_ANONYMOUS,
-1,
0,
).cast();
if STORAGE == libc::MAP_FAILED {
panic!("error: mmap failed");
}

let p_count = (&mut COUNT) as *mut u32;
p_count.write_volatile(0);

// Trigger segfaults
STORAGE.add(0).write_volatile(1);
STORAGE.add(PAGE_SIZE).write_volatile(1);
STORAGE.add(0).write_volatile(1);
STORAGE.add(PAGE_SIZE).write_volatile(1);
STORAGE.add(0).write_volatile(1);
STORAGE.add(PAGE_SIZE).write_volatile(1);
STORAGE.add(0).read_volatile();
STORAGE.add(PAGE_SIZE).read_volatile();
STORAGE.add(0).read_volatile();
STORAGE.add(PAGE_SIZE).read_volatile();
STORAGE.add(0).read_volatile();
STORAGE.add(PAGE_SIZE).read_volatile();
STORAGE.add(0).write_volatile(1);
STORAGE.add(PAGE_SIZE).write_volatile(1);

// The segfault handler should have been called for every `write_volatile` and
// `read_volatile` in `STORAGE`. If the compiler ignores volatility, some of these writes
// will be combined, causing a different number of segfaults.
//
// This `p_count` read is done by a volatile read. If the compiler
// ignores volatility, the compiler will speculate that `*p_count` is
// unchanged and remove this check, failing the test.
if p_count.read_volatile() != 14 {
panic!("error: segfault count mismatch: {}", p_count.read_volatile());
}
}
}

unsafe extern "C" fn segv_handler(_: i32, _: *mut (), _: *mut ()) {
let p_count = (&mut COUNT) as *mut u32;
p_count.write_volatile(p_count.read_volatile() + 1);
let count = p_count.read_volatile();

// Toggle the protected page so that the handler will be called for
// each `write_volatile`
libc::mprotect(
STORAGE.cast(),
PAGE_SIZE,
if count % 2 == 1 { libc::PROT_READ | libc::PROT_WRITE } else { 0 },
);
libc::mprotect(
STORAGE.add(PAGE_SIZE).cast(),
PAGE_SIZE,
if count % 2 == 0 { libc::PROT_READ | libc::PROT_WRITE } else { 0 },
);
}