|
| 1 | +//! Contains macOS-specific synchronization functions. |
| 2 | +//! |
| 3 | +//! For `os_unfair_lock`, see the documentation |
| 4 | +//! <https://developer.apple.com/documentation/os/synchronization?language=objc> |
| 5 | +//! and in case of underspecification its implementation |
| 6 | +//! <https://github.com/apple-oss-distributions/libplatform/blob/a00a4cc36da2110578bcf3b8eeeeb93dcc7f4e11/src/os/lock.c#L645>. |
| 7 | +//! |
| 8 | +//! Note that we don't emulate every edge-case behaviour of the locks. Notably, |
| 9 | +//! we don't abort when locking a lock owned by a thread that has already exited |
| 10 | +//! and we do not detect copying of the lock, but macOS doesn't guarantee anything |
| 11 | +//! in that case either. |
| 12 | +
|
| 13 | +use crate::*; |
| 14 | + |
| 15 | +impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {} |
| 16 | +trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { |
| 17 | + fn os_unfair_lock_getid(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> { |
| 18 | + let this = self.eval_context_mut(); |
| 19 | + // os_unfair_lock holds a 32-bit value, is initialized with zero and |
| 20 | + // must be assumed to be opaque. Therefore, we can just store our |
| 21 | + // internal mutex ID in the structure without anyone noticing. |
| 22 | + this.mutex_get_or_create_id(lock_op, this.libc_ty_layout("os_unfair_lock"), 0) |
| 23 | + } |
| 24 | +} |
| 25 | + |
| 26 | +impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {} |
| 27 | +pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { |
| 28 | + fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { |
| 29 | + let this = self.eval_context_mut(); |
| 30 | + |
| 31 | + let id = this.os_unfair_lock_getid(lock_op)?; |
| 32 | + if this.mutex_is_locked(id) { |
| 33 | + if this.mutex_get_owner(id) == this.active_thread() { |
| 34 | + // Matching the current macOS implementation: abort on reentrant locking. |
| 35 | + throw_machine_stop!(TerminationInfo::Abort( |
| 36 | + "attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned() |
| 37 | + )); |
| 38 | + } |
| 39 | + |
| 40 | + this.mutex_enqueue_and_block(id, None); |
| 41 | + } else { |
| 42 | + this.mutex_lock(id); |
| 43 | + } |
| 44 | + |
| 45 | + Ok(()) |
| 46 | + } |
| 47 | + |
| 48 | + fn os_unfair_lock_trylock( |
| 49 | + &mut self, |
| 50 | + lock_op: &OpTy<'tcx>, |
| 51 | + dest: &MPlaceTy<'tcx>, |
| 52 | + ) -> InterpResult<'tcx> { |
| 53 | + let this = self.eval_context_mut(); |
| 54 | + |
| 55 | + let id = this.os_unfair_lock_getid(lock_op)?; |
| 56 | + if this.mutex_is_locked(id) { |
| 57 | + // Contrary to the blocking lock function, this does not check for |
| 58 | + // reentrancy. |
| 59 | + this.write_scalar(Scalar::from_bool(false), dest)?; |
| 60 | + } else { |
| 61 | + this.mutex_lock(id); |
| 62 | + this.write_scalar(Scalar::from_bool(true), dest)?; |
| 63 | + } |
| 64 | + |
| 65 | + Ok(()) |
| 66 | + } |
| 67 | + |
| 68 | + fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { |
| 69 | + let this = self.eval_context_mut(); |
| 70 | + |
| 71 | + let id = this.os_unfair_lock_getid(lock_op)?; |
| 72 | + if this.mutex_unlock(id)?.is_none() { |
| 73 | + // Matching the current macOS implementation: abort. |
| 74 | + throw_machine_stop!(TerminationInfo::Abort( |
| 75 | + "attempted to unlock an os_unfair_lock not owned by the current thread".to_owned() |
| 76 | + )); |
| 77 | + } |
| 78 | + |
| 79 | + Ok(()) |
| 80 | + } |
| 81 | + |
| 82 | + fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { |
| 83 | + let this = self.eval_context_mut(); |
| 84 | + |
| 85 | + let id = this.os_unfair_lock_getid(lock_op)?; |
| 86 | + if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() { |
| 87 | + throw_machine_stop!(TerminationInfo::Abort( |
| 88 | + "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned() |
| 89 | + )); |
| 90 | + } |
| 91 | + |
| 92 | + Ok(()) |
| 93 | + } |
| 94 | + |
| 95 | + fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> { |
| 96 | + let this = self.eval_context_mut(); |
| 97 | + |
| 98 | + let id = this.os_unfair_lock_getid(lock_op)?; |
| 99 | + if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() { |
| 100 | + throw_machine_stop!(TerminationInfo::Abort( |
| 101 | + "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned() |
| 102 | + )); |
| 103 | + } |
| 104 | + |
| 105 | + Ok(()) |
| 106 | + } |
| 107 | +} |
0 commit comments