@@ -9,45 +9,9 @@ use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div}
9
9
use core:: mem;
10
10
use core:: ops:: { BitOr , Shl } ;
11
11
12
- #[ cfg_attr( all( feature = "mem" , not( feature = "mangled-names" ) ) , no_mangle) ]
13
- pub unsafe extern "C" fn memcpy ( dest : * mut u8 , src : * const u8 , n : usize ) -> * mut u8 {
14
- let mut i = 0 ;
15
- while i < n {
16
- * dest. offset ( i as isize ) = * src. offset ( i as isize ) ;
17
- i += 1 ;
18
- }
19
- dest
20
- }
21
-
22
- #[ cfg_attr( all( feature = "mem" , not( feature = "mangled-names" ) ) , no_mangle) ]
23
- pub unsafe extern "C" fn memmove ( dest : * mut u8 , src : * const u8 , n : usize ) -> * mut u8 {
24
- if src < dest as * const u8 {
25
- // copy from end
26
- let mut i = n;
27
- while i != 0 {
28
- i -= 1 ;
29
- * dest. offset ( i as isize ) = * src. offset ( i as isize ) ;
30
- }
31
- } else {
32
- // copy from beginning
33
- let mut i = 0 ;
34
- while i < n {
35
- * dest. offset ( i as isize ) = * src. offset ( i as isize ) ;
36
- i += 1 ;
37
- }
38
- }
39
- dest
40
- }
41
-
42
- #[ cfg_attr( all( feature = "mem" , not( feature = "mangled-names" ) ) , no_mangle) ]
43
- pub unsafe extern "C" fn memset ( s : * mut u8 , c : c_int , n : usize ) -> * mut u8 {
44
- let mut i = 0 ;
45
- while i < n {
46
- * s. offset ( i as isize ) = c as u8 ;
47
- i += 1 ;
48
- }
49
- s
50
- }
12
+ // memcpy/memmove/memset have optimized implementations on some architectures
13
+ mod memcpy;
14
+ pub use self :: memcpy:: * ;
51
15
52
16
#[ cfg_attr( all( feature = "mem" , not( feature = "mangled-names" ) ) , no_mangle) ]
53
17
pub unsafe extern "C" fn memcmp ( s1 : * const u8 , s2 : * const u8 , n : usize ) -> i32 {
0 commit comments