Skip to content

Commit 204aaf4

Browse files
committed
---
yaml --- r: 150237 b: refs/heads/try2 c: 3572a30 h: refs/heads/master i: 150235: a3686ad v: v3
1 parent 4c766ee commit 204aaf4

File tree

2 files changed

+83
-62
lines changed

2 files changed

+83
-62
lines changed

[refs]

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ refs/heads/snap-stage3: 78a7676898d9f80ab540c6df5d4c9ce35bb50463
55
refs/heads/try: 519addf6277dbafccbb4159db4b710c37eaa2ec5
66
refs/tags/release-0.1: 1f5c5126e96c79d22cb7862f75304136e204f105
77
refs/heads/ndm: f3868061cd7988080c30d6d5bf352a5a5fe2460b
8-
refs/heads/try2: d6b3f1f231350798f019fdb09f6c4979fb23b8d4
8+
refs/heads/try2: 3572a30e7a4fec7f0bb0957fc72588757111f14e
99
refs/heads/dist-snap: ba4081a5a8573875fed17545846f6f6902c8ba8d
1010
refs/tags/release-0.2: c870d2dffb391e14efb05aa27898f1f6333a9596
1111
refs/tags/release-0.3: b5f0d0f648d9a6153664837026ba1be43d3e2503

branches/try2/src/libsync/sync/mutex.rs renamed to branches/try2/src/libsync/mutex.rs

Lines changed: 82 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -57,13 +57,16 @@
5757
// times in order to manage a few flags about who's blocking where and whether
5858
// it's locked or not.
5959

60+
use std::kinds::marker;
61+
use std::mem;
6062
use std::rt::local::Local;
6163
use std::rt::task::{BlockedTask, Task};
6264
use std::rt::thread::Thread;
6365
use std::sync::atomics;
66+
use std::ty::Unsafe;
6467
use std::unstable::mutex;
6568

66-
use q = sync::mpsc_intrusive;
69+
use q = mpsc_intrusive;
6770

6871
pub static LOCKED: uint = 1 << 0;
6972
pub static GREEN_BLOCKED: uint = 1 << 1;
@@ -85,7 +88,7 @@ pub static NATIVE_BLOCKED: uint = 1 << 2;
8588
/// ```rust
8689
/// use sync::mutex::Mutex;
8790
///
88-
/// let mut m = Mutex::new();
91+
/// let m = Mutex::new();
8992
/// let guard = m.lock();
9093
/// // do some work
9194
/// drop(guard); // unlock the lock
@@ -126,14 +129,15 @@ enum Flavor {
126129
pub struct StaticMutex {
127130
/// Current set of flags on this mutex
128131
priv state: atomics::AtomicUint,
132+
/// an OS mutex used by native threads
133+
priv lock: mutex::StaticNativeMutex,
134+
129135
/// Type of locking operation currently on this mutex
130-
priv flavor: Flavor,
136+
priv flavor: Unsafe<Flavor>,
131137
/// uint-cast of the green thread waiting for this mutex
132-
priv green_blocker: uint,
138+
priv green_blocker: Unsafe<uint>,
133139
/// uint-cast of the native thread waiting for this mutex
134-
priv native_blocker: uint,
135-
/// an OS mutex used by native threads
136-
priv lock: mutex::StaticNativeMutex,
140+
priv native_blocker: Unsafe<uint>,
137141

138142
/// A concurrent mpsc queue used by green threads, along with a count used
139143
/// to figure out when to dequeue and enqueue.
@@ -145,21 +149,24 @@ pub struct StaticMutex {
145149
/// dropped (falls out of scope), the lock will be unlocked.
146150
#[must_use]
147151
pub struct Guard<'a> {
148-
priv lock: &'a mut StaticMutex,
152+
priv lock: &'a StaticMutex,
149153
}
150154

151155
/// Static initialization of a mutex. This constant can be used to initialize
152156
/// other mutex constants.
153157
pub static MUTEX_INIT: StaticMutex = StaticMutex {
154158
lock: mutex::NATIVE_MUTEX_INIT,
155159
state: atomics::INIT_ATOMIC_UINT,
156-
flavor: Unlocked,
157-
green_blocker: 0,
158-
native_blocker: 0,
160+
flavor: Unsafe { value: Unlocked, marker1: marker::InvariantType },
161+
green_blocker: Unsafe { value: 0, marker1: marker::InvariantType },
162+
native_blocker: Unsafe { value: 0, marker1: marker::InvariantType },
159163
green_cnt: atomics::INIT_ATOMIC_UINT,
160164
q: q::Queue {
161165
head: atomics::INIT_ATOMIC_UINT,
162-
tail: 0 as *mut q::Node<uint>,
166+
tail: Unsafe {
167+
value: 0 as *mut q::Node<uint>,
168+
marker1: marker::InvariantType,
169+
},
163170
stub: q::DummyNode {
164171
next: atomics::INIT_ATOMIC_UINT,
165172
}
@@ -168,34 +175,34 @@ pub static MUTEX_INIT: StaticMutex = StaticMutex {
168175

169176
impl StaticMutex {
170177
/// Attempts to grab this lock, see `Mutex::try_lock`
171-
pub fn try_lock<'a>(&'a mut self) -> Option<Guard<'a>> {
178+
pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
172179
// Attempt to steal the mutex from an unlocked state.
173180
//
174181
// FIXME: this can mess up the fairness of the mutex, seems bad
175182
match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) {
176183
0 => {
177-
assert!(self.flavor == Unlocked);
178-
self.flavor = TryLockAcquisition;
184+
// After acquiring the mutex, we can safely access the inner
185+
// fields.
186+
let prev = unsafe {
187+
mem::replace(&mut *self.flavor.get(), TryLockAcquisition)
188+
};
189+
assert_eq!(prev, Unlocked);
179190
Some(Guard::new(self))
180191
}
181192
_ => None
182193
}
183194
}
184195

185196
/// Acquires this lock, see `Mutex::lock`
186-
pub fn lock<'a>(&'a mut self) -> Guard<'a> {
197+
pub fn lock<'a>(&'a self) -> Guard<'a> {
187198
// First, attempt to steal the mutex from an unlocked state. The "fast
188199
// path" needs to have as few atomic instructions as possible, and this
189200
// one cmpxchg is already pretty expensive.
190201
//
191202
// FIXME: this can mess up the fairness of the mutex, seems bad
192-
match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) {
193-
0 => {
194-
assert!(self.flavor == Unlocked);
195-
self.flavor = TryLockAcquisition;
196-
return Guard::new(self)
197-
}
198-
_ => {}
203+
match self.try_lock() {
204+
Some(guard) => return guard,
205+
None => {}
199206
}
200207

201208
// After we've failed the fast path, then we delegate to the differnet
@@ -219,11 +226,14 @@ impl StaticMutex {
219226
let mut old = match self.state.compare_and_swap(0, LOCKED,
220227
atomics::SeqCst) {
221228
0 => {
222-
self.flavor = if can_block {
229+
let flavor = if can_block {
223230
NativeAcquisition
224231
} else {
225232
GreenAcquisition
226233
};
234+
// We've acquired the lock, so this unsafe access to flavor is
235+
// allowed.
236+
unsafe { *self.flavor.get() = flavor; }
227237
return Guard::new(self)
228238
}
229239
old => old,
@@ -237,13 +247,15 @@ impl StaticMutex {
237247
let t: ~Task = Local::take();
238248
t.deschedule(1, |task| {
239249
let task = unsafe { task.cast_to_uint() };
240-
if can_block {
241-
assert_eq!(self.native_blocker, 0);
242-
self.native_blocker = task;
250+
251+
// These accesses are protected by the respective native/green
252+
// mutexes which were acquired above.
253+
let prev = if can_block {
254+
unsafe { mem::replace(&mut *self.native_blocker.get(), task) }
243255
} else {
244-
assert_eq!(self.green_blocker, 0);
245-
self.green_blocker = task;
246-
}
256+
unsafe { mem::replace(&mut *self.green_blocker.get(), task) }
257+
};
258+
assert_eq!(prev, 0);
247259

248260
loop {
249261
assert_eq!(old & native_bit, 0);
@@ -264,14 +276,23 @@ impl StaticMutex {
264276
old | LOCKED,
265277
atomics::SeqCst) {
266278
n if n == old => {
267-
assert_eq!(self.flavor, Unlocked);
268-
if can_block {
269-
self.native_blocker = 0;
270-
self.flavor = NativeAcquisition;
279+
// After acquiring the lock, we have access to the
280+
// flavor field, and we've regained access to our
281+
// respective native/green blocker field.
282+
let prev = if can_block {
283+
unsafe {
284+
*self.native_blocker.get() = 0;
285+
mem::replace(&mut *self.flavor.get(),
286+
NativeAcquisition)
287+
}
271288
} else {
272-
self.green_blocker = 0;
273-
self.flavor = GreenAcquisition;
274-
}
289+
unsafe {
290+
*self.green_blocker.get() = 0;
291+
mem::replace(&mut *self.flavor.get(),
292+
GreenAcquisition)
293+
}
294+
};
295+
assert_eq!(prev, Unlocked);
275296
return Err(unsafe {
276297
BlockedTask::cast_from_uint(task)
277298
})
@@ -287,16 +308,16 @@ impl StaticMutex {
287308

288309
// Tasks which can block are super easy. These tasks just call the blocking
289310
// `lock()` function on an OS mutex
290-
fn native_lock(&mut self, t: ~Task) {
311+
fn native_lock(&self, t: ~Task) {
291312
Local::put(t);
292313
unsafe { self.lock.lock_noguard(); }
293314
}
294315

295-
fn native_unlock(&mut self) {
316+
fn native_unlock(&self) {
296317
unsafe { self.lock.unlock_noguard(); }
297318
}
298319

299-
fn green_lock(&mut self, t: ~Task) {
320+
fn green_lock(&self, t: ~Task) {
300321
// Green threads flag their presence with an atomic counter, and if they
301322
// fail to be the first to the mutex, they enqueue themselves on a
302323
// concurrent internal queue with a stack-allocated node.
@@ -318,7 +339,7 @@ impl StaticMutex {
318339
});
319340
}
320341

321-
fn green_unlock(&mut self) {
342+
fn green_unlock(&self) {
322343
// If we're the only green thread, then no need to check the queue,
323344
// otherwise the fixme above forces us to spin for a bit.
324345
if self.green_cnt.fetch_sub(1, atomics::SeqCst) == 1 { return }
@@ -333,7 +354,7 @@ impl StaticMutex {
333354
task.wake().map(|t| t.reawaken());
334355
}
335356

336-
fn unlock(&mut self) {
357+
fn unlock(&self) {
337358
// Unlocking this mutex is a little tricky. We favor any task that is
338359
// manually blocked (not in each of the separate locks) in order to help
339360
// provide a little fairness (green threads will wake up the pending
@@ -351,8 +372,7 @@ impl StaticMutex {
351372
// task needs to be woken, and in this case it's ok that the "mutex
352373
// halves" are unlocked, we're just mainly dealing with the atomic state
353374
// of the outer mutex.
354-
let flavor = self.flavor;
355-
self.flavor = Unlocked;
375+
let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) };
356376

357377
let mut state = self.state.load(atomics::SeqCst);
358378
let mut unlocked = false;
@@ -362,18 +382,18 @@ impl StaticMutex {
362382
if state & GREEN_BLOCKED != 0 {
363383
self.unset(state, GREEN_BLOCKED);
364384
task = unsafe {
365-
BlockedTask::cast_from_uint(self.green_blocker)
385+
*self.flavor.get() = GreenAcquisition;
386+
let task = mem::replace(&mut *self.green_blocker.get(), 0);
387+
BlockedTask::cast_from_uint(task)
366388
};
367-
self.green_blocker = 0;
368-
self.flavor = GreenAcquisition;
369389
break;
370390
} else if state & NATIVE_BLOCKED != 0 {
371391
self.unset(state, NATIVE_BLOCKED);
372392
task = unsafe {
373-
BlockedTask::cast_from_uint(self.native_blocker)
393+
*self.flavor.get() = NativeAcquisition;
394+
let task = mem::replace(&mut *self.native_blocker.get(), 0);
395+
BlockedTask::cast_from_uint(task)
374396
};
375-
self.native_blocker = 0;
376-
self.flavor = NativeAcquisition;
377397
break;
378398
} else {
379399
assert_eq!(state, LOCKED);
@@ -405,7 +425,7 @@ impl StaticMutex {
405425
}
406426

407427
/// Loops around a CAS to unset the `bit` in `state`
408-
fn unset(&mut self, mut state: uint, bit: uint) {
428+
fn unset(&self, mut state: uint, bit: uint) {
409429
loop {
410430
assert!(state & bit != 0);
411431
let new = state ^ bit;
@@ -426,7 +446,7 @@ impl StaticMutex {
426446
/// *all* platforms. It may be the case that some platforms do not leak
427447
/// memory if this method is not called, but this is not guaranteed to be
428448
/// true on all platforms.
429-
pub unsafe fn destroy(&mut self) {
449+
pub unsafe fn destroy(&self) {
430450
self.lock.destroy()
431451
}
432452
}
@@ -437,9 +457,9 @@ impl Mutex {
437457
Mutex {
438458
lock: StaticMutex {
439459
state: atomics::AtomicUint::new(0),
440-
flavor: Unlocked,
441-
green_blocker: 0,
442-
native_blocker: 0,
460+
flavor: Unsafe::new(Unlocked),
461+
green_blocker: Unsafe::new(0),
462+
native_blocker: Unsafe::new(0),
443463
green_cnt: atomics::AtomicUint::new(0),
444464
q: q::Queue::new(),
445465
lock: unsafe { mutex::StaticNativeMutex::new() },
@@ -454,7 +474,7 @@ impl Mutex {
454474
/// guard is dropped.
455475
///
456476
/// This function does not block.
457-
pub fn try_lock<'a>(&'a mut self) -> Option<Guard<'a>> {
477+
pub fn try_lock<'a>(&'a self) -> Option<Guard<'a>> {
458478
self.lock.try_lock()
459479
}
460480

@@ -464,13 +484,14 @@ impl Mutex {
464484
/// the mutex. Upon returning, the task is the only task with the mutex
465485
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
466486
/// the guard goes out of scope, the mutex will be unlocked.
467-
pub fn lock<'a>(&'a mut self) -> Guard<'a> { self.lock.lock() }
487+
pub fn lock<'a>(&'a self) -> Guard<'a> { self.lock.lock() }
468488
}
469489

470490
impl<'a> Guard<'a> {
471-
fn new<'b>(lock: &'b mut StaticMutex) -> Guard<'b> {
491+
fn new<'b>(lock: &'b StaticMutex) -> Guard<'b> {
472492
if cfg!(debug) {
473-
assert!(lock.flavor != Unlocked);
493+
// once we've acquired a lock, it's ok to access the flavor
494+
assert!(unsafe { *lock.flavor.get() != Unlocked });
474495
assert!(lock.state.load(atomics::SeqCst) & LOCKED != 0);
475496
}
476497
Guard { lock: lock }
@@ -501,7 +522,7 @@ mod test {
501522

502523
#[test]
503524
fn smoke() {
504-
let mut m = Mutex::new();
525+
let m = Mutex::new();
505526
drop(m.lock());
506527
drop(m.lock());
507528
}
@@ -552,7 +573,7 @@ mod test {
552573

553574
#[test]
554575
fn trylock() {
555-
let mut m = Mutex::new();
576+
let m = Mutex::new();
556577
assert!(m.try_lock().is_some());
557578
}
558579
}

0 commit comments

Comments
 (0)