62
62
// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
63
63
// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
64
64
// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
65
- // - At the end of the `call_inner ` function we have to make sure the result
65
+ // - At the end of the `try_call_inner ` function we have to make sure the result
66
66
// of the `Once` is acquired. So every load which can be the only one to
67
67
// load COMPLETED must have at least Acquire ordering, which means all
68
68
// three of them.
75
75
// `state_and_queue` with Acquire ordering.
76
76
// - There is just one store where `state_and_queue` is used only as a
77
77
// state flag, without having to synchronize data: switching the state
78
- // from INCOMPLETE to RUNNING in `call_inner `. This store can be Relaxed,
78
+ // from INCOMPLETE to RUNNING in `try_call_inner `. This store can be Relaxed,
79
79
// but the read has to be Acquire because of the requirements mentioned
80
80
// above.
81
81
// * `Waiter.signaled` is both used as a flag, and to protect a field with
@@ -93,7 +93,7 @@ use crate::fmt;
93
93
use crate :: marker;
94
94
use crate :: ptr;
95
95
use crate :: sync:: atomic:: { spin_loop_hint, AtomicBool , AtomicUsize , Ordering } ;
96
- use crate :: thread:: { self , Thread } ;
96
+ use crate :: thread:: { self , Thread , ThreadId } ;
97
97
98
98
/// A synchronization primitive which can be used to run a one-time global
99
99
/// initialization. Useful for one-time initialization for FFI or related
@@ -190,6 +190,7 @@ struct Waiter {
190
190
#[ repr( align( 4 ) ) ] // Ensure the two lower bits are free to use as state bits.
191
191
struct WaiterQueue {
192
192
head : Cell < * const Waiter > ,
193
+ id : ThreadId ,
193
194
}
194
195
195
196
// A guard that will wake up the waiters when it gets dropped, i.e. also on panic.
@@ -202,6 +203,13 @@ struct WaiterQueueGuard<'a> {
202
203
set_state_on_drop_to : usize ,
203
204
}
204
205
206
+ // Potential outcomes of calling try_call_inner
207
+ enum CallResult {
208
+ Complete ,
209
+ Poisoned ,
210
+ Reentrance ,
211
+ }
212
+
205
213
impl Once {
206
214
/// Creates a new `Once` value.
207
215
#[ stable( feature = "once_new" , since = "1.2.0" ) ]
@@ -403,13 +411,27 @@ impl Once {
403
411
// without some allocation overhead.
404
412
#[ cold]
405
413
fn call_inner ( & self , ignore_poisoning : bool , init : & mut dyn FnMut ( & OnceState ) ) {
414
+ match self . try_call_inner ( ignore_poisoning, init) {
415
+ CallResult :: Complete => ( ) ,
416
+ // Panic to propagate the poison.
417
+ CallResult :: Poisoned => panic ! ( "Once instance has previously been poisoned" ) ,
418
+ CallResult :: Reentrance => panic ! ( "Once instance cannot be recursively initialized" ) ,
419
+ }
420
+ }
421
+
422
+ fn try_call_inner (
423
+ & self ,
424
+ ignore_poisoning : bool ,
425
+ init : & mut dyn FnMut ( & OnceState ) ,
426
+ ) -> CallResult {
406
427
let mut state_and_queue = self . state_and_queue . load ( Ordering :: Acquire ) ;
407
428
loop {
408
429
match state_and_queue {
409
- COMPLETE => break ,
430
+ COMPLETE => {
431
+ return CallResult :: Complete ;
432
+ }
410
433
POISONED if !ignore_poisoning => {
411
- // Panic to propagate the poison.
412
- panic ! ( "Once instance has previously been poisoned" ) ;
434
+ return CallResult :: Poisoned ;
413
435
}
414
436
POISONED | INCOMPLETE => {
415
437
// Try to register this thread as the one RUNNING.
@@ -426,7 +448,8 @@ impl Once {
426
448
427
449
// `waiter_queue` will manage other waiting threads, and `queue_guard`
428
450
// will wake them up on drop.
429
- let waiter_queue = WaiterQueue { head : Cell :: new ( ptr:: null ( ) ) } ;
451
+ let waiter_queue =
452
+ WaiterQueue { head : Cell :: new ( ptr:: null ( ) ) , id : thread:: current ( ) . id ( ) } ;
430
453
let mut queue_guard = WaiterQueueGuard {
431
454
state_and_queue : & self . state_and_queue ,
432
455
queue : & waiter_queue,
@@ -445,27 +468,32 @@ impl Once {
445
468
} ;
446
469
init ( & init_state) ;
447
470
queue_guard. set_state_on_drop_to = init_state. set_state_on_drop_to . get ( ) ;
448
- break ;
471
+ return CallResult :: Complete ;
449
472
}
450
473
_ => {
451
474
// All other values must be RUNNING with possibly a
452
475
// pointer to the waiter queue in the more significant bits.
453
476
assert ! ( state_and_queue & STATE_MASK == RUNNING ) ;
454
- wait ( & self . state_and_queue , state_and_queue) ;
477
+ if wait ( & self . state_and_queue , state_and_queue) {
478
+ return CallResult :: Reentrance ;
479
+ }
455
480
state_and_queue = self . state_and_queue . load ( Ordering :: Acquire ) ;
456
481
}
457
482
}
458
483
}
459
484
}
460
485
}
461
486
462
- fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) {
487
+ // Returns whether reentrance has been detected.
488
+ fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) -> bool {
463
489
// Note: the following code was carefully written to avoid creating a
464
490
// mutable reference to `node` that gets aliased.
465
491
466
492
// Create a node upfront to reduce time spent inside spin lock.
493
+ let thread = thread:: current ( ) ;
494
+ let id = thread. id ( ) ;
467
495
let node = Waiter {
468
- thread : Cell :: new ( Some ( thread:: current ( ) ) ) ,
496
+ thread : Cell :: new ( Some ( thread) ) ,
469
497
signaled : AtomicBool :: new ( false ) ,
470
498
next : Cell :: new ( ptr:: null ( ) ) ,
471
499
} ;
@@ -475,7 +503,7 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
475
503
// Don't queue this thread if the status is no longer running,
476
504
// otherwise we will not be woken up.
477
505
if current_state & STATE_MASK != RUNNING {
478
- return ;
506
+ return false ;
479
507
}
480
508
481
509
// Currently locked, spin.
@@ -496,18 +524,28 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
496
524
}
497
525
498
526
// Insert our node into the linked list.
499
- {
527
+ let reentry = {
500
528
// SAFETY: This is okay because we have just "lock"ed it. Even the thread
501
529
// that creates this WaiterQueue would need to lock it before drop it, so
502
530
// the reference is definitely not dangling.
503
531
let queue = unsafe { & * ( ( current_state & !STATE_MASK ) as * const WaiterQueue ) } ;
504
- node. next . set ( queue. head . get ( ) ) ;
505
- queue. head . set ( & node as * const Waiter ) ;
506
- }
532
+ if queue. id != id {
533
+ node. next . set ( queue. head . get ( ) ) ;
534
+ queue. head . set ( & node as * const Waiter ) ;
535
+ false
536
+ } else {
537
+ // If thread id matches then this is an reentrance to try_call_inner
538
+ true
539
+ }
540
+ } ;
507
541
508
542
// Unlock the WaiterQueue.
509
543
state_and_queue. store ( current_state, Ordering :: Release ) ;
510
544
545
+ if reentry {
546
+ return true ;
547
+ }
548
+
511
549
// We have enqueued ourselves, now lets wait.
512
550
// It is important not to return before being signaled, otherwise we
513
551
// would drop our `Waiter` node and leave a hole in the linked list
@@ -520,6 +558,8 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
520
558
// an `unpark` just before on an unparked thread is does not park.
521
559
thread:: park ( ) ;
522
560
}
561
+
562
+ false
523
563
}
524
564
525
565
#[ stable( feature = "std_debug" , since = "1.16.0" ) ]
0 commit comments