8
8
// Spurious failure is possible, if you are really unlucky with
9
9
// the RNG and always read the latest value from the store buffer.
10
10
11
- use std:: sync:: atomic:: AtomicUsize ;
12
11
use std:: sync:: atomic:: Ordering :: * ;
12
+ use std:: sync:: atomic:: { fence, AtomicUsize } ;
13
13
use std:: thread:: spawn;
14
14
15
15
#[ derive( Copy , Clone ) ]
@@ -70,7 +70,7 @@ fn seq_cst() -> bool {
70
70
r3 == 1
71
71
}
72
72
73
- fn initialization_write ( ) -> bool {
73
+ fn initialization_write ( add_fence : bool ) -> bool {
74
74
let x = static_atomic ( 11 ) ;
75
75
assert_eq ! ( x. load( Relaxed ) , 11 ) ; // work around https://github.com/rust-lang/miri/issues/2164
76
76
@@ -85,6 +85,9 @@ fn initialization_write() -> bool {
85
85
86
86
let j2 = spawn ( move || {
87
87
reads_value ( wait, 1 ) ;
88
+ if add_fence {
89
+ fence ( AcqRel ) ;
90
+ }
88
91
x. load ( Relaxed )
89
92
} ) ;
90
93
@@ -94,6 +97,46 @@ fn initialization_write() -> bool {
94
97
r2 == 11
95
98
}
96
99
100
+ fn faa_replaced_by_load ( ) -> bool {
101
+ // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905
102
+ #[ no_mangle]
103
+ pub fn rdmw ( storing : & AtomicUsize , sync : & AtomicUsize , loading : & AtomicUsize ) -> usize {
104
+ storing. store ( 1 , Relaxed ) ;
105
+ fence ( Release ) ;
106
+ // sync.fetch_add(0, Relaxed);
107
+ sync. load ( Relaxed ) ;
108
+ fence ( Acquire ) ;
109
+ loading. load ( Relaxed )
110
+ }
111
+
112
+ let x = static_atomic ( 0 ) ;
113
+ assert_eq ! ( x. load( Relaxed ) , 0 ) ; // work around https://github.com/rust-lang/miri/issues/2164
114
+ let y = static_atomic ( 0 ) ;
115
+ assert_eq ! ( y. load( Relaxed ) , 0 ) ; // work around https://github.com/rust-lang/miri/issues/2164
116
+ let z = static_atomic ( 0 ) ;
117
+ assert_eq ! ( z. load( Relaxed ) , 0 ) ; // work around https://github.com/rust-lang/miri/issues/2164
118
+
119
+ // Since each thread is so short, we need to make sure that they truely run at the same time
120
+ // Otherwise t1 will finish before t2 even starts
121
+ let go = static_atomic ( 0 ) ;
122
+
123
+ let t1 = spawn ( move || {
124
+ while go. load ( Relaxed ) == 0 { }
125
+ rdmw ( y, x, z)
126
+ } ) ;
127
+
128
+ let t2 = spawn ( move || {
129
+ while go. load ( Relaxed ) == 0 { }
130
+ rdmw ( z, x, y)
131
+ } ) ;
132
+
133
+ go. store ( 1 , Relaxed ) ;
134
+
135
+ let a = t1. join ( ) . unwrap ( ) ;
136
+ let b = t2. join ( ) . unwrap ( ) ;
137
+ ( a, b) == ( 0 , 0 )
138
+ }
139
+
97
140
/// Asserts that the function returns true at least once in 100 runs
98
141
fn assert_once ( f : fn ( ) -> bool ) {
99
142
assert ! ( std:: iter:: repeat_with( || f( ) ) . take( 100 ) . any( |x| x) ) ;
@@ -102,5 +145,7 @@ fn assert_once(f: fn() -> bool) {
102
145
pub fn main ( ) {
103
146
assert_once ( relaxed) ;
104
147
assert_once ( seq_cst) ;
105
- assert_once ( initialization_write) ;
148
+ assert_once ( || initialization_write ( false ) ) ;
149
+ assert_once ( || initialization_write ( true ) ) ;
150
+ assert_once ( faa_replaced_by_load) ;
106
151
}
0 commit comments