Skip to content

Commit 9a4cd35

Browse files
committed
ensure that a macOS os_unfair_lock that is moved while being held is not implicitly unlocked
1 parent 5947a48 commit 9a4cd35

File tree

8 files changed

+178
-59
lines changed

8 files changed

+178
-59
lines changed

src/concurrency/sync.rs

+13-8
Original file line numberDiff line numberDiff line change
@@ -223,13 +223,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
223223
}
224224

225225
/// Helper for lazily initialized `alloc_extra.sync` data:
226-
/// Checks if the primitive is initialized, and return its associated data if so.
227-
/// Otherwise, calls `new_data` to initialize the primitive.
226+
/// Checks if the primitive is initialized:
227+
/// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
228+
/// and stores that in `alloc_extra.sync`.
229+
/// - Otherwise, calls `new_data` to initialize the primitive.
228230
fn lazy_sync_get_data<T: 'static + Copy>(
229231
&mut self,
230232
primitive: &MPlaceTy<'tcx>,
231233
init_offset: Size,
232-
name: &str,
234+
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
233235
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
234236
) -> InterpResult<'tcx, T> {
235237
let this = self.eval_context_mut();
@@ -254,11 +256,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
254256
// If it is initialized, it must be found in the "sync primitive" table,
255257
// or else it has been moved illegally.
256258
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
257-
let alloc_extra = this.get_alloc_extra(alloc)?;
258-
let data = alloc_extra
259-
.get_sync::<T>(offset)
260-
.ok_or_else(|| err_ub_format!("`{name}` can't be moved after first use"))?;
261-
interp_ok(*data)
259+
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
260+
if let Some(data) = alloc_extra.get_sync::<T>(offset) {
261+
interp_ok(*data)
262+
} else {
263+
let data = missing_data()?;
264+
alloc_extra.sync.insert(offset, Box::new(data));
265+
interp_ok(data)
266+
}
262267
} else {
263268
let data = new_data(this)?;
264269
this.lazy_sync_init(primitive, init_offset, data)?;

src/concurrency/thread.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ macro_rules! callback {
5959
@unblock = |$this:ident| $unblock:block
6060
) => {
6161
callback!(
62-
@capture<$tcx, $($lft),*> { $($name: $type),+ }
62+
@capture<$tcx, $($lft),*> { $($name: $type),* }
6363
@unblock = |$this| $unblock
6464
@timeout = |_this| {
6565
unreachable!(

src/shims/unix/macos/sync.rs

+83-15
Original file line numberDiff line numberDiff line change
@@ -10,24 +10,42 @@
1010
//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
1111
//! in that case either.
1212
13+
use rustc_target::abi::Size;
14+
1315
use crate::*;
1416

15-
struct MacOsUnfairLock {
16-
id: MutexId,
17+
#[derive(Copy, Clone)]
18+
enum MacOsUnfairLock {
19+
Poisoned,
20+
Active { id: MutexId },
1721
}
1822

1923
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
2024
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
21-
fn os_unfair_lock_getid(&mut self, lock_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> {
25+
fn os_unfair_lock_get_data(
26+
&mut self,
27+
lock_ptr: &OpTy<'tcx>,
28+
) -> InterpResult<'tcx, MacOsUnfairLock> {
2229
let this = self.eval_context_mut();
2330
let lock = this.deref_pointer(lock_ptr)?;
24-
// We store the mutex ID in the `sync` metadata. This means that when the lock is moved,
25-
// that's just implicitly creating a new lock at the new location.
26-
let data = this.get_sync_or_init(lock.ptr(), |machine| {
27-
let id = machine.sync.mutex_create();
28-
interp_ok(MacOsUnfairLock { id })
29-
})?;
30-
interp_ok(data.id)
31+
this.lazy_sync_get_data(
32+
&lock,
33+
Size::ZERO, // offset for init tracking
34+
|| {
35+
// If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
36+
// this means the lock was moved while locked. This can happen with a `std` lock,
37+
// but then any future attempt to unlock will just deadlock. In practice, terrible
38+
// things can probably happen if you swap two locked locks, since they'd wake up
39+
// from the wrong queue... we just won't catch all UB of this library API then (we
40+
// would need to store some unique identifer in-memory for this, instead of a static
41+
// LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
42+
interp_ok(MacOsUnfairLock::Poisoned)
43+
},
44+
|ecx| {
45+
let id = ecx.machine.sync.mutex_create();
46+
interp_ok(MacOsUnfairLock::Active { id })
47+
},
48+
)
3149
}
3250
}
3351

@@ -36,7 +54,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
3654
fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
3755
let this = self.eval_context_mut();
3856

39-
let id = this.os_unfair_lock_getid(lock_op)?;
57+
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
58+
// Trying to get a poisoned lock. Just block forever...
59+
this.block_thread(
60+
BlockReason::Sleep,
61+
None,
62+
callback!(
63+
@capture<'tcx> {}
64+
@unblock = |_this| {
65+
panic!("we shouldn't wake up ever")
66+
}
67+
),
68+
);
69+
return interp_ok(());
70+
};
71+
4072
if this.mutex_is_locked(id) {
4173
if this.mutex_get_owner(id) == this.active_thread() {
4274
// Matching the current macOS implementation: abort on reentrant locking.
@@ -60,7 +92,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
6092
) -> InterpResult<'tcx> {
6193
let this = self.eval_context_mut();
6294

63-
let id = this.os_unfair_lock_getid(lock_op)?;
95+
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
96+
// Trying to get a poisoned lock. That never works.
97+
this.write_scalar(Scalar::from_bool(false), dest)?;
98+
return interp_ok(());
99+
};
100+
64101
if this.mutex_is_locked(id) {
65102
// Contrary to the blocking lock function, this does not check for
66103
// reentrancy.
@@ -76,40 +113,71 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
76113
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
77114
let this = self.eval_context_mut();
78115

79-
let id = this.os_unfair_lock_getid(lock_op)?;
116+
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
117+
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
118+
throw_machine_stop!(TerminationInfo::Abort(
119+
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
120+
));
121+
};
122+
123+
// Now, unlock.
80124
if this.mutex_unlock(id)?.is_none() {
81125
// Matching the current macOS implementation: abort.
82126
throw_machine_stop!(TerminationInfo::Abort(
83127
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
84128
));
85129
}
86130

131+
// If the lock is not locked by anyone now, it went quer.
132+
// Reset to zero so that it can be moved and initialized again for the next phase.
133+
if !this.mutex_is_locked(id) {
134+
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
135+
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
136+
}
137+
87138
interp_ok(())
88139
}
89140

90141
fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
91142
let this = self.eval_context_mut();
92143

93-
let id = this.os_unfair_lock_getid(lock_op)?;
144+
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
145+
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
146+
throw_machine_stop!(TerminationInfo::Abort(
147+
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
148+
));
149+
};
94150
if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() {
95151
throw_machine_stop!(TerminationInfo::Abort(
96152
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
97153
));
98154
}
99155

156+
// The lock is definitely not quiet since we are the owner.
157+
100158
interp_ok(())
101159
}
102160

103161
fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
104162
let this = self.eval_context_mut();
105163

106-
let id = this.os_unfair_lock_getid(lock_op)?;
164+
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
165+
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
166+
return interp_ok(());
167+
};
107168
if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() {
108169
throw_machine_stop!(TerminationInfo::Abort(
109170
"called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
110171
));
111172
}
112173

174+
// If the lock is not locked by anyone now, it went quer.
175+
// Reset to zero so that it can be moved and initialized again for the next phase.
176+
if !this.mutex_is_locked(id) {
177+
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
178+
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
179+
}
180+
113181
interp_ok(())
114182
}
115183
}

src/shims/unix/sync.rs

+43-28
Original file line numberDiff line numberDiff line change
@@ -189,11 +189,16 @@ fn mutex_get_data<'tcx, 'a>(
189189
mutex_ptr: &OpTy<'tcx>,
190190
) -> InterpResult<'tcx, PthreadMutex> {
191191
let mutex = ecx.deref_pointer(mutex_ptr)?;
192-
ecx.lazy_sync_get_data(&mutex, mutex_init_offset(ecx)?, "pthread_mutex_t", |ecx| {
193-
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
194-
let id = ecx.machine.sync.mutex_create();
195-
interp_ok(PthreadMutex { id, kind })
196-
})
192+
ecx.lazy_sync_get_data(
193+
&mutex,
194+
mutex_init_offset(ecx)?,
195+
|| throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
196+
|ecx| {
197+
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
198+
let id = ecx.machine.sync.mutex_create();
199+
interp_ok(PthreadMutex { id, kind })
200+
},
201+
)
197202
}
198203

199204
/// Returns the kind of a static initializer.
@@ -261,17 +266,22 @@ fn rwlock_get_data<'tcx>(
261266
rwlock_ptr: &OpTy<'tcx>,
262267
) -> InterpResult<'tcx, PthreadRwLock> {
263268
let rwlock = ecx.deref_pointer(rwlock_ptr)?;
264-
ecx.lazy_sync_get_data(&rwlock, rwlock_init_offset(ecx)?, "pthread_rwlock_t", |ecx| {
265-
if !bytewise_equal_atomic_relaxed(
266-
ecx,
267-
&rwlock,
268-
&ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
269-
)? {
270-
throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
271-
}
272-
let id = ecx.machine.sync.rwlock_create();
273-
interp_ok(PthreadRwLock { id })
274-
})
269+
ecx.lazy_sync_get_data(
270+
&rwlock,
271+
rwlock_init_offset(ecx)?,
272+
|| throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
273+
|ecx| {
274+
if !bytewise_equal_atomic_relaxed(
275+
ecx,
276+
&rwlock,
277+
&ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
278+
)? {
279+
throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
280+
}
281+
let id = ecx.machine.sync.rwlock_create();
282+
interp_ok(PthreadRwLock { id })
283+
},
284+
)
275285
}
276286

277287
// # pthread_condattr_t
@@ -386,18 +396,23 @@ fn cond_get_data<'tcx>(
386396
cond_ptr: &OpTy<'tcx>,
387397
) -> InterpResult<'tcx, PthreadCondvar> {
388398
let cond = ecx.deref_pointer(cond_ptr)?;
389-
ecx.lazy_sync_get_data(&cond, cond_init_offset(ecx)?, "pthread_cond_t", |ecx| {
390-
if !bytewise_equal_atomic_relaxed(
391-
ecx,
392-
&cond,
393-
&ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
394-
)? {
395-
throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
396-
}
397-
// This used the static initializer. The clock there is always CLOCK_REALTIME.
398-
let id = ecx.machine.sync.condvar_create();
399-
interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
400-
})
399+
ecx.lazy_sync_get_data(
400+
&cond,
401+
cond_init_offset(ecx)?,
402+
|| throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
403+
|ecx| {
404+
if !bytewise_equal_atomic_relaxed(
405+
ecx,
406+
&cond,
407+
&ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
408+
)? {
409+
throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
410+
}
411+
// This used the static initializer. The clock there is always CLOCK_REALTIME.
412+
let id = ecx.machine.sync.condvar_create();
413+
interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
414+
},
415+
)
401416
}
402417

403418
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}

src/shims/windows/sync.rs

+10-5
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,16 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
2424
let init_once = this.deref_pointer(init_once_ptr)?;
2525
let init_offset = Size::ZERO;
2626

27-
this.lazy_sync_get_data(&init_once, init_offset, "INIT_ONCE", |this| {
28-
// TODO: check that this is still all-zero.
29-
let id = this.machine.sync.init_once_create();
30-
interp_ok(WindowsInitOnce { id })
31-
})
27+
this.lazy_sync_get_data(
28+
&init_once,
29+
init_offset,
30+
|| throw_ub_format!("`INIT_ONCE` can't be moved after first use"),
31+
|this| {
32+
// TODO: check that this is still all-zero.
33+
let id = this.machine.sync.init_once_create();
34+
interp_ok(WindowsInitOnce { id })
35+
},
36+
)
3237
}
3338

3439
/// Returns `true` if we were succssful, `false` if we would block.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
//@only-target: darwin
2+
3+
use std::cell::UnsafeCell;
4+
5+
fn main() {
6+
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
7+
8+
unsafe { libc::os_unfair_lock_lock(lock.get()) };
9+
let lock = lock;
10+
// This needs to either error or deadlock.
11+
unsafe { libc::os_unfair_lock_lock(lock.get()) };
12+
//~^ error: deadlock
13+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
error: deadlock: the evaluated program deadlocked
2+
--> tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC
3+
|
4+
LL | unsafe { libc::os_unfair_lock_lock(lock.get()) };
5+
| ^ the evaluated program deadlocked
6+
|
7+
= note: BACKTRACE:
8+
= note: inside `main` at tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC
9+
10+
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
11+
12+
error: aborting due to 1 previous error
13+

tests/pass-dep/concurrency/apple-os-unfair-lock.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ fn main() {
1616

1717
// `os_unfair_lock`s can be moved and leaked.
1818
// In the real implementation, even moving it while locked is possible
19-
// (and "forks" the lock, i.e. old and new location have independent wait queues);
20-
// Miri behavior differs here and anyway none of this is documented.
19+
// (and "forks" the lock, i.e. old and new location have independent wait queues).
20+
// We only test the somewhat sane case of moving while unlocked that `std` plans to rely on.
2121
let lock = lock;
2222
let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) };
2323
assert!(locked);

0 commit comments

Comments
 (0)