10
10
//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
11
11
//! in that case either.
12
12
13
+ use rustc_target:: abi:: Size ;
14
+
13
15
use crate :: * ;
14
16
15
- struct MacOsUnfairLock {
16
- id : MutexId ,
17
+ #[ derive( Copy , Clone ) ]
18
+ enum MacOsUnfairLock {
19
+ Poisoned ,
20
+ Active { id : MutexId } ,
17
21
}
18
22
19
23
impl < ' tcx > EvalContextExtPriv < ' tcx > for crate :: MiriInterpCx < ' tcx > { }
20
24
trait EvalContextExtPriv < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
21
- fn os_unfair_lock_getid ( & mut self , lock_ptr : & OpTy < ' tcx > ) -> InterpResult < ' tcx , MutexId > {
25
+ fn os_unfair_lock_get_data (
26
+ & mut self ,
27
+ lock_ptr : & OpTy < ' tcx > ,
28
+ ) -> InterpResult < ' tcx , MacOsUnfairLock > {
22
29
let this = self . eval_context_mut ( ) ;
23
30
let lock = this. deref_pointer ( lock_ptr) ?;
24
- // We store the mutex ID in the `sync` metadata. This means that when the lock is moved,
25
- // that's just implicitly creating a new lock at the new location.
26
- let data = this. get_sync_or_init ( lock. ptr ( ) , |machine| {
27
- let id = machine. sync . mutex_create ( ) ;
28
- interp_ok ( MacOsUnfairLock { id } )
29
- } ) ?;
30
- interp_ok ( data. id )
31
+ this. lazy_sync_get_data (
32
+ & lock,
33
+ Size :: ZERO , // offset for init tracking
34
+ || {
35
+ // If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
36
+ // this means the lock was moved while locked. This can happen with a `std` lock,
37
+ // but then any future attempt to unlock will just deadlock. In practice, terrible
38
+ // things can probably happen if you swap two locked locks, since they'd wake up
39
+ // from the wrong queue... we just won't catch all UB of this library API then (we
40
+ // would need to store some unique identifer in-memory for this, instead of a static
41
+ // LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
42
+ interp_ok ( MacOsUnfairLock :: Poisoned )
43
+ } ,
44
+ |ecx| {
45
+ let id = ecx. machine . sync . mutex_create ( ) ;
46
+ interp_ok ( MacOsUnfairLock :: Active { id } )
47
+ } ,
48
+ )
31
49
}
32
50
}
33
51
@@ -36,7 +54,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
36
54
fn os_unfair_lock_lock ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
37
55
let this = self . eval_context_mut ( ) ;
38
56
39
- let id = this. os_unfair_lock_getid ( lock_op) ?;
57
+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
58
+ // Trying to get a poisoned lock. Just block forever...
59
+ this. block_thread (
60
+ BlockReason :: Sleep ,
61
+ None ,
62
+ callback ! (
63
+ @capture<' tcx> { }
64
+ @unblock = |_this| {
65
+ panic!( "we shouldn't wake up ever" )
66
+ }
67
+ ) ,
68
+ ) ;
69
+ return interp_ok ( ( ) ) ;
70
+ } ;
71
+
40
72
if this. mutex_is_locked ( id) {
41
73
if this. mutex_get_owner ( id) == this. active_thread ( ) {
42
74
// Matching the current macOS implementation: abort on reentrant locking.
@@ -60,7 +92,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
60
92
) -> InterpResult < ' tcx > {
61
93
let this = self . eval_context_mut ( ) ;
62
94
63
- let id = this. os_unfair_lock_getid ( lock_op) ?;
95
+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
96
+ // Trying to get a poisoned lock. That never works.
97
+ this. write_scalar ( Scalar :: from_bool ( false ) , dest) ?;
98
+ return interp_ok ( ( ) ) ;
99
+ } ;
100
+
64
101
if this. mutex_is_locked ( id) {
65
102
// Contrary to the blocking lock function, this does not check for
66
103
// reentrancy.
@@ -76,40 +113,71 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
76
113
fn os_unfair_lock_unlock ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
77
114
let this = self . eval_context_mut ( ) ;
78
115
79
- let id = this. os_unfair_lock_getid ( lock_op) ?;
116
+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
117
+ // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
118
+ throw_machine_stop ! ( TerminationInfo :: Abort (
119
+ "attempted to unlock an os_unfair_lock not owned by the current thread" . to_owned( )
120
+ ) ) ;
121
+ } ;
122
+
123
+ // Now, unlock.
80
124
if this. mutex_unlock ( id) ?. is_none ( ) {
81
125
// Matching the current macOS implementation: abort.
82
126
throw_machine_stop ! ( TerminationInfo :: Abort (
83
127
"attempted to unlock an os_unfair_lock not owned by the current thread" . to_owned( )
84
128
) ) ;
85
129
}
86
130
131
+ // If the lock is not locked by anyone now, it went quer.
132
+ // Reset to zero so that it can be moved and initialized again for the next phase.
133
+ if !this. mutex_is_locked ( id) {
134
+ let lock_place = this. deref_pointer_as ( lock_op, this. machine . layouts . u32 ) ?;
135
+ this. write_scalar_atomic ( Scalar :: from_u32 ( 0 ) , & lock_place, AtomicWriteOrd :: Relaxed ) ?;
136
+ }
137
+
87
138
interp_ok ( ( ) )
88
139
}
89
140
90
141
fn os_unfair_lock_assert_owner ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
91
142
let this = self . eval_context_mut ( ) ;
92
143
93
- let id = this. os_unfair_lock_getid ( lock_op) ?;
144
+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
145
+ // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
146
+ throw_machine_stop ! ( TerminationInfo :: Abort (
147
+ "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread" . to_owned( )
148
+ ) ) ;
149
+ } ;
94
150
if !this. mutex_is_locked ( id) || this. mutex_get_owner ( id) != this. active_thread ( ) {
95
151
throw_machine_stop ! ( TerminationInfo :: Abort (
96
152
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread" . to_owned( )
97
153
) ) ;
98
154
}
99
155
156
+ // The lock is definitely not quiet since we are the owner.
157
+
100
158
interp_ok ( ( ) )
101
159
}
102
160
103
161
fn os_unfair_lock_assert_not_owner ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
104
162
let this = self . eval_context_mut ( ) ;
105
163
106
- let id = this. os_unfair_lock_getid ( lock_op) ?;
164
+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
165
+ // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
166
+ return interp_ok ( ( ) ) ;
167
+ } ;
107
168
if this. mutex_is_locked ( id) && this. mutex_get_owner ( id) == this. active_thread ( ) {
108
169
throw_machine_stop ! ( TerminationInfo :: Abort (
109
170
"called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread" . to_owned( )
110
171
) ) ;
111
172
}
112
173
174
+ // If the lock is not locked by anyone now, it went quer.
175
+ // Reset to zero so that it can be moved and initialized again for the next phase.
176
+ if !this. mutex_is_locked ( id) {
177
+ let lock_place = this. deref_pointer_as ( lock_op, this. machine . layouts . u32 ) ?;
178
+ this. write_scalar_atomic ( Scalar :: from_u32 ( 0 ) , & lock_place, AtomicWriteOrd :: Relaxed ) ?;
179
+ }
180
+
113
181
interp_ok ( ( ) )
114
182
}
115
183
}
0 commit comments