@@ -5,6 +5,10 @@ type c_int = i16;
5
5
#[ cfg( not( target_pointer_width = "16" ) ) ]
6
6
type c_int = i32 ;
7
7
8
+ use core:: intrinsics:: { atomic_load_unordered, atomic_store_unordered} ;
9
+ use core:: mem;
10
+ use core:: ops:: { BitOr , Shl } ;
11
+
8
12
#[ cfg_attr( all( feature = "mem" , not( feature = "mangled-names" ) ) , no_mangle) ]
9
13
pub unsafe extern "C" fn memcpy ( dest : * mut u8 , src : * const u8 , n : usize ) -> * mut u8 {
10
14
let mut i = 0 ;
@@ -58,3 +62,110 @@ pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
58
62
}
59
63
0
60
64
}
65
+
66
+ unsafe fn memcpy_element_unordered_atomic < T : Copy > ( dest : * mut T , src : * const T , bytes : usize ) {
67
+ let n = bytes / mem:: size_of :: < T > ( ) ;
68
+ let mut i = 0 ;
69
+ while i < n {
70
+ atomic_store_unordered ( dest. add ( i) , atomic_load_unordered ( src. add ( i) ) ) ;
71
+ i += 1 ;
72
+ }
73
+ }
74
+
75
+ unsafe fn memmove_element_unordered_atomic < T : Copy > ( dest : * mut T , src : * const T , bytes : usize ) {
76
+ let n = bytes / mem:: size_of :: < T > ( ) ;
77
+ if src < dest as * const T {
78
+ // copy from end
79
+ let mut i = n;
80
+ while i != 0 {
81
+ i -= 1 ;
82
+ atomic_store_unordered ( dest. add ( i) , atomic_load_unordered ( src. add ( i) ) ) ;
83
+ }
84
+ } else {
85
+ // copy from beginning
86
+ let mut i = 0 ;
87
+ while i < n {
88
+ atomic_store_unordered ( dest. add ( i) , atomic_load_unordered ( src. add ( i) ) ) ;
89
+ i += 1 ;
90
+ }
91
+ }
92
+ }
93
+
94
+ unsafe fn memset_element_unordered_atomic < T > ( s : * mut T , c : u8 , bytes : usize )
95
+ where
96
+ T : Copy + From < u8 > + Shl < u32 , Output = T > + BitOr < T , Output = T > ,
97
+ {
98
+ let n = bytes / mem:: size_of :: < T > ( ) ;
99
+ let mut x = T :: from ( c) ;
100
+ for _ in 1 ..mem:: size_of :: < T > ( ) {
101
+ x = x << 8 | T :: from ( c) ;
102
+ }
103
+ let mut i = 0 ;
104
+ while i < n {
105
+ atomic_store_unordered ( s. add ( i) , x) ;
106
+ i += 1 ;
107
+ }
108
+ }
109
+
110
+ macro_rules! trampoline {
111
+ ( $( fn $fun: ident( $( $iid: ident : $ity: ty) ,+) {
112
+ $( $body: tt) *
113
+ } ) +) => {
114
+ $(
115
+ #[ cfg_attr( not( feature = "mangled-names" ) , no_mangle) ]
116
+ pub unsafe extern "C" fn $fun( $( $iid: $ity) ,+) -> ( ) {
117
+ $( $body) *
118
+ }
119
+ ) +
120
+ }
121
+ }
122
+
123
+ trampoline ! {
124
+ fn __llvm_memcpy_element_unordered_atomic_1( dest: * mut u8 , src: * const u8 , bytes: usize ) {
125
+ memcpy_element_unordered_atomic( dest, src, bytes) ;
126
+ }
127
+ fn __llvm_memcpy_element_unordered_atomic_2( dest: * mut u16 , src: * const u16 , bytes: usize ) {
128
+ memcpy_element_unordered_atomic( dest, src, bytes) ;
129
+ }
130
+ fn __llvm_memcpy_element_unordered_atomic_4( dest: * mut u32 , src: * const u32 , bytes: usize ) {
131
+ memcpy_element_unordered_atomic( dest, src, bytes) ;
132
+ }
133
+ fn __llvm_memcpy_element_unordered_atomic_8( dest: * mut u64 , src: * const u64 , bytes: usize ) {
134
+ memcpy_element_unordered_atomic( dest, src, bytes) ;
135
+ }
136
+ fn __llvm_memcpy_element_unordered_atomic_16( dest: * mut u128 , src: * const u128 , bytes: usize ) {
137
+ memcpy_element_unordered_atomic( dest, src, bytes) ;
138
+ }
139
+
140
+ fn __llvm_memmove_element_unordered_atomic_1( dest: * mut u8 , src: * const u8 , bytes: usize ) {
141
+ memmove_element_unordered_atomic( dest, src, bytes) ;
142
+ }
143
+ fn __llvm_memmove_element_unordered_atomic_2( dest: * mut u16 , src: * const u16 , bytes: usize ) {
144
+ memmove_element_unordered_atomic( dest, src, bytes) ;
145
+ }
146
+ fn __llvm_memmove_element_unordered_atomic_4( dest: * mut u32 , src: * const u32 , bytes: usize ) {
147
+ memmove_element_unordered_atomic( dest, src, bytes) ;
148
+ }
149
+ fn __llvm_memmove_element_unordered_atomic_8( dest: * mut u64 , src: * const u64 , bytes: usize ) {
150
+ memmove_element_unordered_atomic( dest, src, bytes) ;
151
+ }
152
+ fn __llvm_memmove_element_unordered_atomic_16( dest: * mut u128 , src: * const u128 , bytes: usize ) {
153
+ memmove_element_unordered_atomic( dest, src, bytes) ;
154
+ }
155
+
156
+ fn __llvm_memset_element_unordered_atomic_1( s: * mut u8 , c: u8 , bytes: usize ) {
157
+ memset_element_unordered_atomic( s, c, bytes) ;
158
+ }
159
+ fn __llvm_memset_element_unordered_atomic_2( s: * mut u16 , c: u8 , bytes: usize ) {
160
+ memset_element_unordered_atomic( s, c, bytes) ;
161
+ }
162
+ fn __llvm_memset_element_unordered_atomic_4( s: * mut u32 , c: u8 , bytes: usize ) {
163
+ memset_element_unordered_atomic( s, c, bytes) ;
164
+ }
165
+ fn __llvm_memset_element_unordered_atomic_8( s: * mut u64 , c: u8 , bytes: usize ) {
166
+ memset_element_unordered_atomic( s, c, bytes) ;
167
+ }
168
+ fn __llvm_memset_element_unordered_atomic_16( s: * mut u128 , c: u8 , bytes: usize ) {
169
+ memset_element_unordered_atomic( s, c, bytes) ;
170
+ }
171
+ }
0 commit comments