Skip to content

Commit 6fd7e90

Browse files
committed
Auto merge of rust-lang#96042 - m-ou-se:one-reentrant-mutex, r=Amanieu
Use a single ReentrantMutex implementation on all platforms. This replaces all platform specific ReentrantMutex implementations by the one I added in rust-lang#95727 for Linux, since that one does not depend on any platform specific details. r? `@Amanieu`
2 parents 491f619 + 94f00e3 commit 6fd7e90

File tree

13 files changed

+92
-541
lines changed

13 files changed

+92
-541
lines changed

library/std/src/sys/hermit/mutex.rs

-36
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
use crate::cell::UnsafeCell;
22
use crate::collections::VecDeque;
3-
use crate::ffi::c_void;
43
use crate::hint;
54
use crate::ops::{Deref, DerefMut, Drop};
65
use crate::ptr;
@@ -220,38 +219,3 @@ impl Mutex {
220219
#[inline]
221220
pub unsafe fn destroy(&self) {}
222221
}
223-
224-
pub struct ReentrantMutex {
225-
inner: *const c_void,
226-
}
227-
228-
impl ReentrantMutex {
229-
pub const unsafe fn uninitialized() -> ReentrantMutex {
230-
ReentrantMutex { inner: ptr::null() }
231-
}
232-
233-
#[inline]
234-
pub unsafe fn init(&self) {
235-
let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _);
236-
}
237-
238-
#[inline]
239-
pub unsafe fn lock(&self) {
240-
let _ = abi::recmutex_lock(self.inner);
241-
}
242-
243-
#[inline]
244-
pub unsafe fn try_lock(&self) -> bool {
245-
true
246-
}
247-
248-
#[inline]
249-
pub unsafe fn unlock(&self) {
250-
let _ = abi::recmutex_unlock(self.inner);
251-
}
252-
253-
#[inline]
254-
pub unsafe fn destroy(&self) {
255-
let _ = abi::recmutex_destroy(self.inner);
256-
}
257-
}

library/std/src/sys/itron/mutex.rs

-93
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ use super::{
55
error::{expect_success, expect_success_aborting, fail, ItronError},
66
spin::SpinIdOnceCell,
77
};
8-
use crate::cell::UnsafeCell;
98

109
pub struct Mutex {
1110
/// The ID of the underlying mutex object
@@ -89,95 +88,3 @@ impl Drop for MutexGuard<'_> {
8988
unsafe { self.0.unlock() };
9089
}
9190
}
92-
93-
// All empty stubs because this platform does not yet support threads, so lock
94-
// acquisition always succeeds.
95-
pub struct ReentrantMutex {
96-
/// The ID of the underlying mutex object
97-
mtx: abi::ID,
98-
/// The lock count.
99-
count: UnsafeCell<usize>,
100-
}
101-
102-
unsafe impl Send for ReentrantMutex {}
103-
unsafe impl Sync for ReentrantMutex {}
104-
105-
impl ReentrantMutex {
106-
pub const unsafe fn uninitialized() -> ReentrantMutex {
107-
ReentrantMutex { mtx: 0, count: UnsafeCell::new(0) }
108-
}
109-
110-
pub unsafe fn init(&mut self) {
111-
self.mtx = expect_success(
112-
unsafe {
113-
abi::acre_mtx(&abi::T_CMTX {
114-
// Priority inheritance mutex
115-
mtxatr: abi::TA_INHERIT,
116-
// Unused
117-
ceilpri: 0,
118-
})
119-
},
120-
&"acre_mtx",
121-
);
122-
}
123-
124-
pub unsafe fn lock(&self) {
125-
match unsafe { abi::loc_mtx(self.mtx) } {
126-
abi::E_OBJ => {
127-
// Recursive lock
128-
unsafe {
129-
let count = &mut *self.count.get();
130-
if let Some(new_count) = count.checked_add(1) {
131-
*count = new_count;
132-
} else {
133-
// counter overflow
134-
rtabort!("lock count overflow");
135-
}
136-
}
137-
}
138-
er => {
139-
expect_success(er, &"loc_mtx");
140-
}
141-
}
142-
}
143-
144-
pub unsafe fn unlock(&self) {
145-
unsafe {
146-
let count = &mut *self.count.get();
147-
if *count > 0 {
148-
*count -= 1;
149-
return;
150-
}
151-
}
152-
153-
expect_success_aborting(unsafe { abi::unl_mtx(self.mtx) }, &"unl_mtx");
154-
}
155-
156-
pub unsafe fn try_lock(&self) -> bool {
157-
let er = unsafe { abi::ploc_mtx(self.mtx) };
158-
if er == abi::E_OBJ {
159-
// Recursive lock
160-
unsafe {
161-
let count = &mut *self.count.get();
162-
if let Some(new_count) = count.checked_add(1) {
163-
*count = new_count;
164-
} else {
165-
// counter overflow
166-
rtabort!("lock count overflow");
167-
}
168-
}
169-
true
170-
} else if er == abi::E_TMOUT {
171-
// Locked by another thread
172-
false
173-
} else {
174-
expect_success(er, &"ploc_mtx");
175-
// Top-level lock by the current thread
176-
true
177-
}
178-
}
179-
180-
pub unsafe fn destroy(&self) {
181-
expect_success_aborting(unsafe { abi::del_mtx(self.mtx) }, &"del_mtx");
182-
}
183-
}

library/std/src/sys/sgx/mutex.rs

+1-86
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,4 @@
1-
use fortanix_sgx_abi::Tcs;
2-
3-
use super::abi::thread;
4-
5-
use super::waitqueue::{try_lock_or_false, NotifiedTcs, SpinMutex, WaitQueue, WaitVariable};
1+
use super::waitqueue::{try_lock_or_false, SpinMutex, WaitQueue, WaitVariable};
62

73
pub struct Mutex {
84
inner: SpinMutex<WaitVariable<bool>>,
@@ -60,84 +56,3 @@ impl Mutex {
6056
#[inline]
6157
pub unsafe fn destroy(&self) {}
6258
}
63-
64-
struct ReentrantLock {
65-
owner: Option<Tcs>,
66-
count: usize,
67-
}
68-
69-
pub struct ReentrantMutex {
70-
inner: SpinMutex<WaitVariable<ReentrantLock>>,
71-
}
72-
73-
impl ReentrantMutex {
74-
pub const fn uninitialized() -> ReentrantMutex {
75-
ReentrantMutex {
76-
inner: SpinMutex::new(WaitVariable::new(ReentrantLock { owner: None, count: 0 })),
77-
}
78-
}
79-
80-
#[inline]
81-
pub unsafe fn init(&self) {}
82-
83-
#[inline]
84-
pub unsafe fn lock(&self) {
85-
let mut guard = self.inner.lock();
86-
match guard.lock_var().owner {
87-
Some(tcs) if tcs != thread::current() => {
88-
// Another thread has the lock, wait
89-
WaitQueue::wait(guard, || {});
90-
// Another thread has passed the lock to us
91-
}
92-
_ => {
93-
// We are just now obtaining the lock
94-
guard.lock_var_mut().owner = Some(thread::current());
95-
guard.lock_var_mut().count += 1;
96-
}
97-
}
98-
}
99-
100-
#[inline]
101-
pub unsafe fn unlock(&self) {
102-
let mut guard = self.inner.lock();
103-
if guard.lock_var().count > 1 {
104-
guard.lock_var_mut().count -= 1;
105-
} else {
106-
match WaitQueue::notify_one(guard) {
107-
Err(mut guard) => {
108-
// No other waiters, unlock
109-
guard.lock_var_mut().count = 0;
110-
guard.lock_var_mut().owner = None;
111-
}
112-
Ok(mut guard) => {
113-
// There was a thread waiting, just pass the lock
114-
if let NotifiedTcs::Single(tcs) = guard.notified_tcs() {
115-
guard.lock_var_mut().owner = Some(tcs)
116-
} else {
117-
unreachable!() // called notify_one
118-
}
119-
}
120-
}
121-
}
122-
}
123-
124-
#[inline]
125-
pub unsafe fn try_lock(&self) -> bool {
126-
let mut guard = try_lock_or_false!(self.inner);
127-
match guard.lock_var().owner {
128-
Some(tcs) if tcs != thread::current() => {
129-
// Another thread has the lock
130-
false
131-
}
132-
_ => {
133-
// We are just now obtaining the lock
134-
guard.lock_var_mut().owner = Some(thread::current());
135-
guard.lock_var_mut().count += 1;
136-
true
137-
}
138-
}
139-
}
140-
141-
#[inline]
142-
pub unsafe fn destroy(&self) {}
143-
}

library/std/src/sys/unix/locks/futex.rs

+1-97
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
use crate::cell::UnsafeCell;
21
use crate::sync::atomic::{
3-
AtomicU32, AtomicUsize,
2+
AtomicU32,
43
Ordering::{Acquire, Relaxed, Release},
54
};
65
use crate::sys::futex::{futex_wait, futex_wake, futex_wake_all};
@@ -163,98 +162,3 @@ impl Condvar {
163162
r
164163
}
165164
}
166-
167-
/// A reentrant mutex. Used by stdout().lock() and friends.
168-
///
169-
/// The 'owner' field tracks which thread has locked the mutex.
170-
///
171-
/// We use current_thread_unique_ptr() as the thread identifier,
172-
/// which is just the address of a thread local variable.
173-
///
174-
/// If `owner` is set to the identifier of the current thread,
175-
/// we assume the mutex is already locked and instead of locking it again,
176-
/// we increment `lock_count`.
177-
///
178-
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
179-
/// it reaches zero.
180-
///
181-
/// `lock_count` is protected by the mutex and only accessed by the thread that has
182-
/// locked the mutex, so needs no synchronization.
183-
///
184-
/// `owner` can be checked by other threads that want to see if they already
185-
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
186-
/// same thread that holds the mutex and memory access can use relaxed ordering
187-
/// since we're not dealing with multiple threads. If it compares unequal,
188-
/// synchronization is left to the mutex, making relaxed memory ordering for
189-
/// the `owner` field fine in all cases.
190-
pub struct ReentrantMutex {
191-
mutex: Mutex,
192-
owner: AtomicUsize,
193-
lock_count: UnsafeCell<u32>,
194-
}
195-
196-
unsafe impl Send for ReentrantMutex {}
197-
unsafe impl Sync for ReentrantMutex {}
198-
199-
impl ReentrantMutex {
200-
#[inline]
201-
pub const unsafe fn uninitialized() -> Self {
202-
Self { mutex: Mutex::new(), owner: AtomicUsize::new(0), lock_count: UnsafeCell::new(0) }
203-
}
204-
205-
#[inline]
206-
pub unsafe fn init(&self) {}
207-
208-
#[inline]
209-
pub unsafe fn destroy(&self) {}
210-
211-
pub unsafe fn try_lock(&self) -> bool {
212-
let this_thread = current_thread_unique_ptr();
213-
if self.owner.load(Relaxed) == this_thread {
214-
self.increment_lock_count();
215-
true
216-
} else if self.mutex.try_lock() {
217-
self.owner.store(this_thread, Relaxed);
218-
debug_assert_eq!(*self.lock_count.get(), 0);
219-
*self.lock_count.get() = 1;
220-
true
221-
} else {
222-
false
223-
}
224-
}
225-
226-
pub unsafe fn lock(&self) {
227-
let this_thread = current_thread_unique_ptr();
228-
if self.owner.load(Relaxed) == this_thread {
229-
self.increment_lock_count();
230-
} else {
231-
self.mutex.lock();
232-
self.owner.store(this_thread, Relaxed);
233-
debug_assert_eq!(*self.lock_count.get(), 0);
234-
*self.lock_count.get() = 1;
235-
}
236-
}
237-
238-
unsafe fn increment_lock_count(&self) {
239-
*self.lock_count.get() = (*self.lock_count.get())
240-
.checked_add(1)
241-
.expect("lock count overflow in reentrant mutex");
242-
}
243-
244-
pub unsafe fn unlock(&self) {
245-
*self.lock_count.get() -= 1;
246-
if *self.lock_count.get() == 0 {
247-
self.owner.store(0, Relaxed);
248-
self.mutex.unlock();
249-
}
250-
}
251-
}
252-
253-
/// Get an address that is unique per running thread.
254-
///
255-
/// This can be used as a non-null usize-sized ID.
256-
pub fn current_thread_unique_ptr() -> usize {
257-
// Use a non-drop type to make sure it's still available during thread destruction.
258-
thread_local! { static X: u8 = const { 0 } }
259-
X.with(|x| <*const _>::addr(x))
260-
}

library/std/src/sys/unix/locks/mod.rs

+1-3
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,13 @@ cfg_if::cfg_if! {
55
))] {
66
mod futex;
77
mod futex_rwlock;
8-
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar, ReentrantMutex};
8+
pub use futex::{Mutex, MovableMutex, Condvar, MovableCondvar};
99
pub use futex_rwlock::{RwLock, MovableRwLock};
1010
} else {
1111
mod pthread_mutex;
12-
mod pthread_remutex;
1312
mod pthread_rwlock;
1413
mod pthread_condvar;
1514
pub use pthread_mutex::{Mutex, MovableMutex};
16-
pub use pthread_remutex::ReentrantMutex;
1715
pub use pthread_rwlock::{RwLock, MovableRwLock};
1816
pub use pthread_condvar::{Condvar, MovableCondvar};
1917
}

0 commit comments

Comments
 (0)