@@ -131,6 +131,17 @@ use crate::intrinsics;
131
131
132
132
use crate :: hint:: spin_loop;
133
133
134
+ // Some architectures don't have byte-sized atomics, which results in LLVM
135
+ // emulating them using a LL/SC loop. However for AtomicBool we can take
136
+ // advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND
137
+ // instead, which LLVM can emulate using a larger atomic OR/AND operation.
138
+ //
139
+ // This list should only contain architectures which have word-sized atomic-or/
140
+ // atomic-and instructions but don't natively support byte-sized atomics.
141
+ #[ cfg( target_has_atomic = "8" ) ]
142
+ const EMULATE_ATOMIC_BOOL : bool =
143
+ cfg ! ( any( target_arch = "riscv32" , target_arch = "riscv64" , target_arch = "loongarch64" ) ) ;
144
+
134
145
/// A boolean type which can be safely shared between threads.
135
146
///
136
147
/// This type has the same in-memory representation as a [`bool`].
@@ -553,8 +564,12 @@ impl AtomicBool {
553
564
#[ cfg( target_has_atomic = "8" ) ]
554
565
#[ cfg_attr( miri, track_caller) ] // even without panics, this helps for Miri backtraces
555
566
pub fn swap ( & self , val : bool , order : Ordering ) -> bool {
556
- // SAFETY: data races are prevented by atomic intrinsics.
557
- unsafe { atomic_swap ( self . v . get ( ) , val as u8 , order) != 0 }
567
+ if EMULATE_ATOMIC_BOOL {
568
+ if val { self . fetch_or ( true , order) } else { self . fetch_and ( false , order) }
569
+ } else {
570
+ // SAFETY: data races are prevented by atomic intrinsics.
571
+ unsafe { atomic_swap ( self . v . get ( ) , val as u8 , order) != 0 }
572
+ }
558
573
}
559
574
560
575
/// Stores a value into the [`bool`] if the current value is the same as the `current` value.
@@ -664,12 +679,39 @@ impl AtomicBool {
664
679
success : Ordering ,
665
680
failure : Ordering ,
666
681
) -> Result < bool , bool > {
667
- // SAFETY: data races are prevented by atomic intrinsics.
668
- match unsafe {
669
- atomic_compare_exchange ( self . v . get ( ) , current as u8 , new as u8 , success, failure)
670
- } {
671
- Ok ( x) => Ok ( x != 0 ) ,
672
- Err ( x) => Err ( x != 0 ) ,
682
+ if EMULATE_ATOMIC_BOOL {
683
+ // Pick the strongest ordering from success and failure.
684
+ let order = match ( success, failure) {
685
+ ( SeqCst , _) => SeqCst ,
686
+ ( _, SeqCst ) => SeqCst ,
687
+ ( AcqRel , _) => AcqRel ,
688
+ ( _, AcqRel ) => {
689
+ panic ! ( "there is no such thing as an acquire-release failure ordering" )
690
+ }
691
+ ( Release , Acquire ) => AcqRel ,
692
+ ( Acquire , _) => Acquire ,
693
+ ( _, Acquire ) => Acquire ,
694
+ ( Release , Relaxed ) => Release ,
695
+ ( _, Release ) => panic ! ( "there is no such thing as a release failure ordering" ) ,
696
+ ( Relaxed , Relaxed ) => Relaxed ,
697
+ } ;
698
+ let old = if current == new {
699
+ // This is a no-op, but we still need to perform the operation
700
+ // for memory ordering reasons.
701
+ self . fetch_or ( false , order)
702
+ } else {
703
+ // This sets the value to the new one and returns the old one.
704
+ self . swap ( new, order)
705
+ } ;
706
+ if old == current { Ok ( old) } else { Err ( old) }
707
+ } else {
708
+ // SAFETY: data races are prevented by atomic intrinsics.
709
+ match unsafe {
710
+ atomic_compare_exchange ( self . v . get ( ) , current as u8 , new as u8 , success, failure)
711
+ } {
712
+ Ok ( x) => Ok ( x != 0 ) ,
713
+ Err ( x) => Err ( x != 0 ) ,
714
+ }
673
715
}
674
716
}
675
717
@@ -719,6 +761,10 @@ impl AtomicBool {
719
761
success : Ordering ,
720
762
failure : Ordering ,
721
763
) -> Result < bool , bool > {
764
+ if EMULATE_ATOMIC_BOOL {
765
+ return self . compare_exchange ( current, new, success, failure) ;
766
+ }
767
+
722
768
// SAFETY: data races are prevented by atomic intrinsics.
723
769
match unsafe {
724
770
atomic_compare_exchange_weak ( self . v . get ( ) , current as u8 , new as u8 , success, failure)
0 commit comments