@@ -104,14 +104,20 @@ macro call_intrinsic_match {
104
104
}
105
105
106
106
macro atomic_binop_return_old ( $fx: expr, $op: ident<$T: ident>( $ptr: ident, $src: ident) -> $ret: ident) {
107
+ crate :: atomic_shim:: lock_global_lock( $fx) ;
108
+
107
109
let clif_ty = $fx. clif_type( $T) . unwrap( ) ;
108
110
let old = $fx. bcx. ins( ) . load( clif_ty, MemFlags :: new( ) , $ptr, 0 ) ;
109
111
let new = $fx. bcx. ins( ) . $op( old, $src) ;
110
112
$fx. bcx. ins( ) . store( MemFlags :: new( ) , new, $ptr, 0 ) ;
111
113
$ret. write_cvalue( $fx, CValue :: by_val( old, $fx. layout_of( $T) ) ) ;
114
+
115
+ crate :: atomic_shim:: unlock_global_lock( $fx) ;
112
116
}
113
117
114
118
macro atomic_minmax( $fx: expr, $cc: expr, <$T: ident> ( $ptr: ident, $src: ident) -> $ret: ident) {
119
+ crate :: atomic_shim:: lock_global_lock( $fx) ;
120
+
115
121
// Read old
116
122
let clif_ty = $fx. clif_type( $T) . unwrap( ) ;
117
123
let old = $fx. bcx. ins( ) . load( clif_ty, MemFlags :: new( ) , $ptr, 0 ) ;
@@ -125,6 +131,8 @@ macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $
125
131
126
132
let ret_val = CValue :: by_val( old, $ret. layout( ) ) ;
127
133
$ret. write_cvalue( $fx, ret_val) ;
134
+
135
+ crate :: atomic_shim:: unlock_global_lock( $fx) ;
128
136
}
129
137
130
138
fn lane_type_and_count < ' tcx > (
@@ -845,19 +853,35 @@ pub fn codegen_intrinsic_call<'tcx>(
845
853
ret. write_cvalue( fx, caller_location) ;
846
854
} ;
847
855
848
- _ if intrinsic. starts_with( "atomic_fence" ) , ( ) { } ;
849
- _ if intrinsic. starts_with( "atomic_singlethreadfence" ) , ( ) { } ;
856
+ _ if intrinsic. starts_with( "atomic_fence" ) , ( ) {
857
+ crate :: atomic_shim:: lock_global_lock( fx) ;
858
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
859
+ } ;
860
+ _ if intrinsic. starts_with( "atomic_singlethreadfence" ) , ( ) {
861
+ crate :: atomic_shim:: lock_global_lock( fx) ;
862
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
863
+ } ;
850
864
_ if intrinsic. starts_with( "atomic_load" ) , ( c ptr) {
865
+ crate :: atomic_shim:: lock_global_lock( fx) ;
866
+
851
867
let inner_layout =
852
868
fx. layout_of( ptr. layout( ) . ty. builtin_deref( true ) . unwrap( ) . ty) ;
853
869
let val = CValue :: by_ref( Pointer :: new( ptr. load_scalar( fx) ) , inner_layout) ;
854
870
ret. write_cvalue( fx, val) ;
871
+
872
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
855
873
} ;
856
874
_ if intrinsic. starts_with( "atomic_store" ) , ( v ptr, c val) {
875
+ crate :: atomic_shim:: lock_global_lock( fx) ;
876
+
857
877
let dest = CPlace :: for_ptr( Pointer :: new( ptr) , val. layout( ) ) ;
858
878
dest. write_cvalue( fx, val) ;
879
+
880
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
859
881
} ;
860
882
_ if intrinsic. starts_with( "atomic_xchg" ) , <T > ( v ptr, c src) {
883
+ crate :: atomic_shim:: lock_global_lock( fx) ;
884
+
861
885
// Read old
862
886
let clif_ty = fx. clif_type( T ) . unwrap( ) ;
863
887
let old = fx. bcx. ins( ) . load( clif_ty, MemFlags :: new( ) , ptr, 0 ) ;
@@ -866,8 +890,12 @@ pub fn codegen_intrinsic_call<'tcx>(
866
890
// Write new
867
891
let dest = CPlace :: for_ptr( Pointer :: new( ptr) , src. layout( ) ) ;
868
892
dest. write_cvalue( fx, src) ;
893
+
894
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
869
895
} ;
870
896
_ if intrinsic. starts_with( "atomic_cxchg" ) , <T > ( v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
897
+ crate :: atomic_shim:: lock_global_lock( fx) ;
898
+
871
899
// Read old
872
900
let clif_ty = fx. clif_type( T ) . unwrap( ) ;
873
901
let old = fx. bcx. ins( ) . load( clif_ty, MemFlags :: new( ) , ptr, 0 ) ;
@@ -881,6 +909,8 @@ pub fn codegen_intrinsic_call<'tcx>(
881
909
882
910
let ret_val = CValue :: by_val_pair( old, fx. bcx. ins( ) . bint( types:: I8 , is_eq) , ret. layout( ) ) ;
883
911
ret. write_cvalue( fx, ret_val) ;
912
+
913
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
884
914
} ;
885
915
886
916
_ if intrinsic. starts_with( "atomic_xadd" ) , <T > ( v ptr, v amount) {
@@ -893,12 +923,16 @@ pub fn codegen_intrinsic_call<'tcx>(
893
923
atomic_binop_return_old! ( fx, band<T >( ptr, src) -> ret) ;
894
924
} ;
895
925
_ if intrinsic. starts_with( "atomic_nand" ) , <T > ( v ptr, v src) {
926
+ crate :: atomic_shim:: lock_global_lock( fx) ;
927
+
896
928
let clif_ty = fx. clif_type( T ) . unwrap( ) ;
897
929
let old = fx. bcx. ins( ) . load( clif_ty, MemFlags :: new( ) , ptr, 0 ) ;
898
930
let and = fx. bcx. ins( ) . band( old, src) ;
899
931
let new = fx. bcx. ins( ) . bnot( and) ;
900
932
fx. bcx. ins( ) . store( MemFlags :: new( ) , new, ptr, 0 ) ;
901
933
ret. write_cvalue( fx, CValue :: by_val( old, fx. layout_of( T ) ) ) ;
934
+
935
+ crate :: atomic_shim:: unlock_global_lock( fx) ;
902
936
} ;
903
937
_ if intrinsic. starts_with( "atomic_or" ) , <T > ( v ptr, v src) {
904
938
atomic_binop_return_old! ( fx, bor<T >( ptr, src) -> ret) ;
0 commit comments