Skip to content

Commit dbcc979

Browse files
committed
Implement LLVM's elementwise unordered atomic memory intrinsics
Allows uses of intrinsics of the form llvm.(memcpy|memmove|memset).element.unordered.atomic.* to be linked.
1 parent 36da64f commit dbcc979

File tree

1 file changed

+113
-0
lines changed

1 file changed

+113
-0
lines changed

src/mem.rs

+113
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,10 @@ type c_int = i16;
55
#[cfg(not(target_pointer_width = "16"))]
66
type c_int = i32;
77

8+
use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, unchecked_div};
9+
use core::mem;
10+
use core::ops::{BitOr, Shl};
11+
812
#[cfg_attr(all(feature = "mem", not(feature = "mangled-names")), no_mangle)]
913
pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
1014
let mut i = 0;
@@ -58,3 +62,112 @@ pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
5862
}
5963
0
6064
}
65+
66+
unsafe fn memcpy_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
67+
let n = unchecked_div(bytes, mem::size_of::<T>());
68+
let mut i = 0;
69+
while i < n {
70+
atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
71+
i += 1;
72+
}
73+
}
74+
75+
unsafe fn memmove_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) {
76+
let n = unchecked_div(bytes, mem::size_of::<T>());
77+
if src < dest as *const T {
78+
// copy from end
79+
let mut i = n;
80+
while i != 0 {
81+
i -= 1;
82+
atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
83+
}
84+
} else {
85+
// copy from beginning
86+
let mut i = 0;
87+
while i < n {
88+
atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i)));
89+
i += 1;
90+
}
91+
}
92+
}
93+
94+
unsafe fn memset_element_unordered_atomic<T>(s: *mut T, c: u8, bytes: usize)
95+
where
96+
T: Copy + From<u8> + Shl<u32, Output = T> + BitOr<T, Output = T>,
97+
{
98+
let n = unchecked_div(bytes, mem::size_of::<T>());
99+
let mut x = T::from(c);
100+
let mut i = 1;
101+
while i < mem::size_of::<T>() {
102+
x = x << 8 | T::from(c);
103+
i += 1;
104+
}
105+
let mut i = 0;
106+
while i < n {
107+
atomic_store_unordered(s.add(i), x);
108+
i += 1;
109+
}
110+
}
111+
112+
macro_rules! trampoline {
113+
($(fn $fun:ident($($iid:ident : $ity:ty),+) {
114+
$($body:tt)*
115+
})+) => {
116+
$(
117+
#[cfg_attr(not(feature = "mangled-names"), no_mangle)]
118+
pub unsafe extern "C" fn $fun($($iid: $ity),+) {
119+
$($body)*
120+
}
121+
)+
122+
}
123+
}
124+
125+
trampoline! {
126+
fn __llvm_memcpy_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) {
127+
memcpy_element_unordered_atomic(dest, src, bytes);
128+
}
129+
fn __llvm_memcpy_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) {
130+
memcpy_element_unordered_atomic(dest, src, bytes);
131+
}
132+
fn __llvm_memcpy_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) {
133+
memcpy_element_unordered_atomic(dest, src, bytes);
134+
}
135+
fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) {
136+
memcpy_element_unordered_atomic(dest, src, bytes);
137+
}
138+
fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) {
139+
memcpy_element_unordered_atomic(dest, src, bytes);
140+
}
141+
142+
fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) {
143+
memmove_element_unordered_atomic(dest, src, bytes);
144+
}
145+
fn __llvm_memmove_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) {
146+
memmove_element_unordered_atomic(dest, src, bytes);
147+
}
148+
fn __llvm_memmove_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) {
149+
memmove_element_unordered_atomic(dest, src, bytes);
150+
}
151+
fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) {
152+
memmove_element_unordered_atomic(dest, src, bytes);
153+
}
154+
fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) {
155+
memmove_element_unordered_atomic(dest, src, bytes);
156+
}
157+
158+
fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) {
159+
memset_element_unordered_atomic(s, c, bytes);
160+
}
161+
fn __llvm_memset_element_unordered_atomic_2(s: *mut u16, c: u8, bytes: usize) {
162+
memset_element_unordered_atomic(s, c, bytes);
163+
}
164+
fn __llvm_memset_element_unordered_atomic_4(s: *mut u32, c: u8, bytes: usize) {
165+
memset_element_unordered_atomic(s, c, bytes);
166+
}
167+
fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) {
168+
memset_element_unordered_atomic(s, c, bytes);
169+
}
170+
fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) {
171+
memset_element_unordered_atomic(s, c, bytes);
172+
}
173+
}

0 commit comments

Comments
 (0)