1
1
use std:: cell:: RefCell ;
2
2
use std:: collections:: VecDeque ;
3
3
use std:: collections:: hash_map:: Entry ;
4
+ use std:: default:: Default ;
4
5
use std:: ops:: Not ;
5
6
use std:: rc:: Rc ;
6
7
use std:: time:: Duration ;
@@ -46,8 +47,6 @@ macro_rules! declare_id {
46
47
}
47
48
pub ( super ) use declare_id;
48
49
49
- declare_id ! ( MutexId ) ;
50
-
51
50
/// The mutex state.
52
51
#[ derive( Default , Debug ) ]
53
52
struct Mutex {
@@ -61,6 +60,21 @@ struct Mutex {
61
60
clock : VClock ,
62
61
}
63
62
63
+ #[ derive( Default , Clone , Debug ) ]
64
+ pub struct MutexRef ( Rc < RefCell < Mutex > > ) ;
65
+
66
+ impl MutexRef {
67
+ fn new ( ) -> Self {
68
+ MutexRef ( Rc :: new ( RefCell :: new ( Mutex :: default ( ) ) ) )
69
+ }
70
+ }
71
+
72
+ impl VisitProvenance for MutexRef {
73
+ fn visit_provenance ( & self , _visit : & mut VisitWith < ' _ > ) {
74
+ // Mutex contains no provenance.
75
+ }
76
+ }
77
+
64
78
declare_id ! ( RwLockId ) ;
65
79
66
80
/// The read-write lock state.
@@ -144,7 +158,6 @@ struct FutexWaiter {
144
158
/// The state of all synchronization objects.
145
159
#[ derive( Default , Debug ) ]
146
160
pub struct SynchronizationObjects {
147
- mutexes : IndexVec < MutexId , Mutex > ,
148
161
rwlocks : IndexVec < RwLockId , RwLock > ,
149
162
condvars : IndexVec < CondvarId , Condvar > ,
150
163
pub ( super ) init_onces : IndexVec < InitOnceId , InitOnce > ,
@@ -155,17 +168,17 @@ impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
155
168
pub ( super ) trait EvalContextExtPriv < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
156
169
fn condvar_reacquire_mutex (
157
170
& mut self ,
158
- mutex : MutexId ,
171
+ mutex_ref : & MutexRef ,
159
172
retval : Scalar ,
160
173
dest : MPlaceTy < ' tcx > ,
161
174
) -> InterpResult < ' tcx > {
162
175
let this = self . eval_context_mut ( ) ;
163
- if this. mutex_is_locked ( mutex ) {
164
- assert_ne ! ( this. mutex_get_owner( mutex ) , this. active_thread( ) ) ;
165
- this. mutex_enqueue_and_block ( mutex , Some ( ( retval, dest) ) ) ;
176
+ if this. mutex_is_locked ( mutex_ref ) {
177
+ assert_ne ! ( this. mutex_get_owner( mutex_ref ) , this. active_thread( ) ) ;
178
+ this. mutex_enqueue_and_block ( mutex_ref , Some ( ( retval, dest) ) ) ;
166
179
} else {
167
180
// We can have it right now!
168
- this. mutex_lock ( mutex ) ;
181
+ this. mutex_lock ( mutex_ref ) ;
169
182
// Don't forget to write the return value.
170
183
this. write_scalar ( retval, & dest) ?;
171
184
}
@@ -174,10 +187,9 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
174
187
}
175
188
176
189
impl SynchronizationObjects {
177
- pub fn mutex_create ( & mut self ) -> MutexId {
178
- self . mutexes . push ( Default :: default ( ) )
190
+ pub fn mutex_create ( & mut self ) -> MutexRef {
191
+ MutexRef :: new ( )
179
192
}
180
-
181
193
pub fn rwlock_create ( & mut self ) -> RwLockId {
182
194
self . rwlocks . push ( Default :: default ( ) )
183
195
}
@@ -209,12 +221,16 @@ impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
209
221
pub trait EvalContextExt < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
210
222
/// Helper for lazily initialized `alloc_extra.sync` data:
211
223
/// this forces an immediate init.
212
- fn lazy_sync_init < T : ' static + Copy > (
213
- & mut self ,
224
+ /// Return a reference to the data in the machine state.
225
+ fn lazy_sync_init < ' a , T : ' static > (
226
+ & ' a mut self ,
214
227
primitive : & MPlaceTy < ' tcx > ,
215
228
init_offset : Size ,
216
229
data : T ,
217
- ) -> InterpResult < ' tcx > {
230
+ ) -> InterpResult < ' tcx , & ' a T >
231
+ where
232
+ ' tcx : ' a ,
233
+ {
218
234
let this = self . eval_context_mut ( ) ;
219
235
220
236
let ( alloc, offset, _) = this. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
@@ -227,21 +243,26 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
227
243
& init_field,
228
244
AtomicWriteOrd :: Relaxed ,
229
245
) ?;
230
- interp_ok ( ( ) )
246
+ interp_ok ( this . get_alloc_extra ( alloc ) ? . get_sync :: < T > ( offset ) . unwrap ( ) )
231
247
}
232
248
233
249
/// Helper for lazily initialized `alloc_extra.sync` data:
234
250
/// Checks if the primitive is initialized:
235
251
/// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
236
252
/// and stores that in `alloc_extra.sync`.
237
253
/// - Otherwise, calls `new_data` to initialize the primitive.
238
- fn lazy_sync_get_data < T : ' static + Copy > (
239
- & mut self ,
254
+ ///
255
+ /// Return a reference to the data in the machine state.
256
+ fn lazy_sync_get_data < ' a , T : ' static > (
257
+ & ' a mut self ,
240
258
primitive : & MPlaceTy < ' tcx > ,
241
259
init_offset : Size ,
242
260
missing_data : impl FnOnce ( ) -> InterpResult < ' tcx , T > ,
243
261
new_data : impl FnOnce ( & mut MiriInterpCx < ' tcx > ) -> InterpResult < ' tcx , T > ,
244
- ) -> InterpResult < ' tcx , T > {
262
+ ) -> InterpResult < ' tcx , & ' a T >
263
+ where
264
+ ' tcx : ' a ,
265
+ {
245
266
let this = self . eval_context_mut ( ) ;
246
267
247
268
// Check if this is already initialized. Needs to be atomic because we can race with another
@@ -265,17 +286,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
265
286
// or else it has been moved illegally.
266
287
let ( alloc, offset, _) = this. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
267
288
let ( alloc_extra, _machine) = this. get_alloc_extra_mut ( alloc) ?;
268
- if let Some ( data) = alloc_extra. get_sync :: < T > ( offset) {
269
- interp_ok ( * data)
270
- } else {
289
+ // Due to borrow checker reasons, we have to do the lookup twice.
290
+ if alloc_extra. get_sync :: < T > ( offset) . is_none ( ) {
271
291
let data = missing_data ( ) ?;
272
292
alloc_extra. sync . insert ( offset, Box :: new ( data) ) ;
273
- interp_ok ( data)
274
293
}
294
+ interp_ok ( alloc_extra. get_sync :: < T > ( offset) . unwrap ( ) )
275
295
} else {
276
296
let data = new_data ( this) ?;
277
- this. lazy_sync_init ( primitive, init_offset, data) ?;
278
- interp_ok ( data)
297
+ this. lazy_sync_init ( primitive, init_offset, data)
279
298
}
280
299
}
281
300
@@ -311,23 +330,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
311
330
312
331
#[ inline]
313
332
/// Get the id of the thread that currently owns this lock.
314
- fn mutex_get_owner ( & mut self , id : MutexId ) -> ThreadId {
315
- let this = self . eval_context_ref ( ) ;
316
- this. machine . sync . mutexes [ id] . owner . unwrap ( )
333
+ fn mutex_get_owner ( & self , mutex_ref : & MutexRef ) -> ThreadId {
334
+ mutex_ref. 0 . borrow ( ) . owner . unwrap ( )
317
335
}
318
336
319
337
#[ inline]
320
338
/// Check if locked.
321
- fn mutex_is_locked ( & self , id : MutexId ) -> bool {
322
- let this = self . eval_context_ref ( ) ;
323
- this. machine . sync . mutexes [ id] . owner . is_some ( )
339
+ fn mutex_is_locked ( & self , mutex_ref : & MutexRef ) -> bool {
340
+ mutex_ref. 0 . borrow ( ) . owner . is_some ( )
324
341
}
325
342
326
343
/// Lock by setting the mutex owner and increasing the lock count.
327
- fn mutex_lock ( & mut self , id : MutexId ) {
344
+ fn mutex_lock ( & mut self , mutex_ref : & MutexRef ) {
328
345
let this = self . eval_context_mut ( ) ;
329
346
let thread = this. active_thread ( ) ;
330
- let mutex = & mut this . machine . sync . mutexes [ id ] ;
347
+ let mut mutex = mutex_ref . 0 . borrow_mut ( ) ;
331
348
if let Some ( current_owner) = mutex. owner {
332
349
assert_eq ! ( thread, current_owner, "mutex already locked by another thread" ) ;
333
350
assert ! (
@@ -347,9 +364,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
347
364
/// count. If the lock count reaches 0, release the lock and potentially
348
365
/// give to a new owner. If the lock was not locked by the current thread,
349
366
/// return `None`.
350
- fn mutex_unlock ( & mut self , id : MutexId ) -> InterpResult < ' tcx , Option < usize > > {
367
+ fn mutex_unlock ( & mut self , mutex_ref : & MutexRef ) -> InterpResult < ' tcx , Option < usize > > {
351
368
let this = self . eval_context_mut ( ) ;
352
- let mutex = & mut this . machine . sync . mutexes [ id ] ;
369
+ let mut mutex = mutex_ref . 0 . borrow_mut ( ) ;
353
370
interp_ok ( if let Some ( current_owner) = mutex. owner {
354
371
// Mutex is locked.
355
372
if current_owner != this. machine . threads . active_thread ( ) {
@@ -367,8 +384,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
367
384
mutex. clock . clone_from ( clock)
368
385
} ) ;
369
386
}
370
- if let Some ( thread) = this. machine . sync . mutexes [ id] . queue . pop_front ( ) {
371
- this. unblock_thread ( thread, BlockReason :: Mutex ( id) ) ?;
387
+ let thread_id = mutex. queue . pop_front ( ) ;
388
+ // We need to drop our mutex borrow before unblock_thread
389
+ // because it will be borrowed again in the unblock callback.
390
+ drop ( mutex) ;
391
+ if thread_id. is_some ( ) {
392
+ this. unblock_thread ( thread_id. unwrap ( ) , BlockReason :: Mutex ) ?;
372
393
}
373
394
}
374
395
Some ( old_lock_count)
@@ -385,24 +406,25 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
385
406
#[ inline]
386
407
fn mutex_enqueue_and_block (
387
408
& mut self ,
388
- id : MutexId ,
409
+ mutex_ref : & MutexRef ,
389
410
retval_dest : Option < ( Scalar , MPlaceTy < ' tcx > ) > ,
390
411
) {
391
412
let this = self . eval_context_mut ( ) ;
392
- assert ! ( this. mutex_is_locked( id ) , "queing on unlocked mutex" ) ;
413
+ assert ! ( this. mutex_is_locked( mutex_ref ) , "queuing on unlocked mutex" ) ;
393
414
let thread = this. active_thread ( ) ;
394
- this. machine . sync . mutexes [ id] . queue . push_back ( thread) ;
415
+ mutex_ref. 0 . borrow_mut ( ) . queue . push_back ( thread) ;
416
+ let mutex_ref = mutex_ref. clone ( ) ;
395
417
this. block_thread (
396
- BlockReason :: Mutex ( id ) ,
418
+ BlockReason :: Mutex ,
397
419
None ,
398
420
callback ! (
399
421
@capture<' tcx> {
400
- id : MutexId ,
422
+ mutex_ref : MutexRef ,
401
423
retval_dest: Option <( Scalar , MPlaceTy <' tcx>) >,
402
424
}
403
425
@unblock = |this| {
404
- assert!( !this. mutex_is_locked( id ) ) ;
405
- this. mutex_lock( id ) ;
426
+ assert!( !this. mutex_is_locked( & mutex_ref ) ) ;
427
+ this. mutex_lock( & mutex_ref ) ;
406
428
407
429
if let Some ( ( retval, dest) ) = retval_dest {
408
430
this. write_scalar( retval, & dest) ?;
@@ -623,14 +645,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
623
645
fn condvar_wait (
624
646
& mut self ,
625
647
condvar : CondvarId ,
626
- mutex : MutexId ,
648
+ mutex_ref : MutexRef ,
627
649
timeout : Option < ( TimeoutClock , TimeoutAnchor , Duration ) > ,
628
650
retval_succ : Scalar ,
629
651
retval_timeout : Scalar ,
630
652
dest : MPlaceTy < ' tcx > ,
631
653
) -> InterpResult < ' tcx > {
632
654
let this = self . eval_context_mut ( ) ;
633
- if let Some ( old_locked_count) = this. mutex_unlock ( mutex ) ? {
655
+ if let Some ( old_locked_count) = this. mutex_unlock ( & mutex_ref ) ? {
634
656
if old_locked_count != 1 {
635
657
throw_unsup_format ! (
636
658
"awaiting a condvar on a mutex acquired multiple times is not supported"
@@ -650,7 +672,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
650
672
callback ! (
651
673
@capture<' tcx> {
652
674
condvar: CondvarId ,
653
- mutex : MutexId ,
675
+ mutex_ref : MutexRef ,
654
676
retval_succ: Scalar ,
655
677
retval_timeout: Scalar ,
656
678
dest: MPlaceTy <' tcx>,
@@ -665,15 +687,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
665
687
}
666
688
// Try to acquire the mutex.
667
689
// The timeout only applies to the first wait (until the signal), not for mutex acquisition.
668
- this. condvar_reacquire_mutex( mutex , retval_succ, dest)
690
+ this. condvar_reacquire_mutex( & mutex_ref , retval_succ, dest)
669
691
}
670
692
@timeout = |this| {
671
693
// We have to remove the waiter from the queue again.
672
694
let thread = this. active_thread( ) ;
673
695
let waiters = & mut this. machine. sync. condvars[ condvar] . waiters;
674
696
waiters. retain( |waiter| * waiter != thread) ;
675
697
// Now get back the lock.
676
- this. condvar_reacquire_mutex( mutex , retval_timeout, dest)
698
+ this. condvar_reacquire_mutex( & mutex_ref , retval_timeout, dest)
677
699
}
678
700
) ,
679
701
) ;
0 commit comments