57
57
// times in order to manage a few flags about who's blocking where and whether
58
58
// it's locked or not.
59
59
60
+ use std:: kinds:: marker;
61
+ use std:: mem;
60
62
use std:: rt:: local:: Local ;
61
63
use std:: rt:: task:: { BlockedTask , Task } ;
62
64
use std:: rt:: thread:: Thread ;
63
65
use std:: sync:: atomics;
66
+ use std:: ty:: Unsafe ;
64
67
use std:: unstable:: mutex;
65
68
66
- use q = sync :: mpsc_intrusive;
69
+ use q = mpsc_intrusive;
67
70
68
71
pub static LOCKED : uint = 1 << 0 ;
69
72
pub static GREEN_BLOCKED : uint = 1 << 1 ;
@@ -85,7 +88,7 @@ pub static NATIVE_BLOCKED: uint = 1 << 2;
85
88
/// ```rust
86
89
/// use sync::mutex::Mutex;
87
90
///
88
- /// let mut m = Mutex::new();
91
+ /// let m = Mutex::new();
89
92
/// let guard = m.lock();
90
93
/// // do some work
91
94
/// drop(guard); // unlock the lock
@@ -126,14 +129,15 @@ enum Flavor {
126
129
pub struct StaticMutex {
127
130
/// Current set of flags on this mutex
128
131
priv state : atomics:: AtomicUint ,
132
+ /// an OS mutex used by native threads
133
+ priv lock : mutex:: StaticNativeMutex ,
134
+
129
135
/// Type of locking operation currently on this mutex
130
- priv flavor : Flavor ,
136
+ priv flavor : Unsafe < Flavor > ,
131
137
/// uint-cast of the green thread waiting for this mutex
132
- priv green_blocker : uint ,
138
+ priv green_blocker : Unsafe < uint > ,
133
139
/// uint-cast of the native thread waiting for this mutex
134
- priv native_blocker : uint ,
135
- /// an OS mutex used by native threads
136
- priv lock : mutex:: StaticNativeMutex ,
140
+ priv native_blocker : Unsafe < uint > ,
137
141
138
142
/// A concurrent mpsc queue used by green threads, along with a count used
139
143
/// to figure out when to dequeue and enqueue.
@@ -145,21 +149,24 @@ pub struct StaticMutex {
145
149
/// dropped (falls out of scope), the lock will be unlocked.
146
150
#[ must_use]
147
151
pub struct Guard < ' a > {
148
- priv lock : & ' a mut StaticMutex ,
152
+ priv lock : & ' a StaticMutex ,
149
153
}
150
154
151
155
/// Static initialization of a mutex. This constant can be used to initialize
152
156
/// other mutex constants.
153
157
pub static MUTEX_INIT : StaticMutex = StaticMutex {
154
158
lock : mutex:: NATIVE_MUTEX_INIT ,
155
159
state : atomics:: INIT_ATOMIC_UINT ,
156
- flavor : Unlocked ,
157
- green_blocker : 0 ,
158
- native_blocker : 0 ,
160
+ flavor : Unsafe { value : Unlocked , marker1 : marker :: InvariantType } ,
161
+ green_blocker : Unsafe { value : 0 , marker1 : marker :: InvariantType } ,
162
+ native_blocker : Unsafe { value : 0 , marker1 : marker :: InvariantType } ,
159
163
green_cnt : atomics:: INIT_ATOMIC_UINT ,
160
164
q : q:: Queue {
161
165
head : atomics:: INIT_ATOMIC_UINT ,
162
- tail : 0 as * mut q:: Node < uint > ,
166
+ tail : Unsafe {
167
+ value : 0 as * mut q:: Node < uint > ,
168
+ marker1 : marker:: InvariantType ,
169
+ } ,
163
170
stub : q:: DummyNode {
164
171
next : atomics:: INIT_ATOMIC_UINT ,
165
172
}
@@ -168,34 +175,34 @@ pub static MUTEX_INIT: StaticMutex = StaticMutex {
168
175
169
176
impl StaticMutex {
170
177
/// Attempts to grab this lock, see `Mutex::try_lock`
171
- pub fn try_lock < ' a > ( & ' a mut self ) -> Option < Guard < ' a > > {
178
+ pub fn try_lock < ' a > ( & ' a self ) -> Option < Guard < ' a > > {
172
179
// Attempt to steal the mutex from an unlocked state.
173
180
//
174
181
// FIXME: this can mess up the fairness of the mutex, seems bad
175
182
match self . state . compare_and_swap ( 0 , LOCKED , atomics:: SeqCst ) {
176
183
0 => {
177
- assert ! ( self . flavor == Unlocked ) ;
178
- self . flavor = TryLockAcquisition ;
184
+ // After acquiring the mutex, we can safely access the inner
185
+ // fields.
186
+ let prev = unsafe {
187
+ mem:: replace ( & mut * self . flavor . get ( ) , TryLockAcquisition )
188
+ } ;
189
+ assert_eq ! ( prev, Unlocked ) ;
179
190
Some ( Guard :: new ( self ) )
180
191
}
181
192
_ => None
182
193
}
183
194
}
184
195
185
196
/// Acquires this lock, see `Mutex::lock`
186
- pub fn lock < ' a > ( & ' a mut self ) -> Guard < ' a > {
197
+ pub fn lock < ' a > ( & ' a self ) -> Guard < ' a > {
187
198
// First, attempt to steal the mutex from an unlocked state. The "fast
188
199
// path" needs to have as few atomic instructions as possible, and this
189
200
// one cmpxchg is already pretty expensive.
190
201
//
191
202
// FIXME: this can mess up the fairness of the mutex, seems bad
192
- match self . state . compare_and_swap ( 0 , LOCKED , atomics:: SeqCst ) {
193
- 0 => {
194
- assert ! ( self . flavor == Unlocked ) ;
195
- self . flavor = TryLockAcquisition ;
196
- return Guard :: new ( self )
197
- }
198
- _ => { }
203
+ match self . try_lock ( ) {
204
+ Some ( guard) => return guard,
205
+ None => { }
199
206
}
200
207
201
208
// After we've failed the fast path, then we delegate to the differnet
@@ -219,11 +226,14 @@ impl StaticMutex {
219
226
let mut old = match self . state . compare_and_swap ( 0 , LOCKED ,
220
227
atomics:: SeqCst ) {
221
228
0 => {
222
- self . flavor = if can_block {
229
+ let flavor = if can_block {
223
230
NativeAcquisition
224
231
} else {
225
232
GreenAcquisition
226
233
} ;
234
+ // We've acquired the lock, so this unsafe access to flavor is
235
+ // allowed.
236
+ unsafe { * self . flavor . get ( ) = flavor; }
227
237
return Guard :: new ( self )
228
238
}
229
239
old => old,
@@ -237,13 +247,15 @@ impl StaticMutex {
237
247
let t: ~Task = Local :: take ( ) ;
238
248
t. deschedule ( 1 , |task| {
239
249
let task = unsafe { task. cast_to_uint ( ) } ;
240
- if can_block {
241
- assert_eq ! ( self . native_blocker, 0 ) ;
242
- self . native_blocker = task;
250
+
251
+ // These accesses are protected by the respective native/green
252
+ // mutexes which were acquired above.
253
+ let prev = if can_block {
254
+ unsafe { mem:: replace ( & mut * self . native_blocker . get ( ) , task) }
243
255
} else {
244
- assert_eq ! ( self . green_blocker, 0 ) ;
245
- self . green_blocker = task ;
246
- }
256
+ unsafe { mem :: replace ( & mut * self . green_blocker . get ( ) , task ) }
257
+ } ;
258
+ assert_eq ! ( prev , 0 ) ;
247
259
248
260
loop {
249
261
assert_eq ! ( old & native_bit, 0 ) ;
@@ -264,14 +276,23 @@ impl StaticMutex {
264
276
old | LOCKED ,
265
277
atomics:: SeqCst ) {
266
278
n if n == old => {
267
- assert_eq ! ( self . flavor, Unlocked ) ;
268
- if can_block {
269
- self . native_blocker = 0 ;
270
- self . flavor = NativeAcquisition ;
279
+ // After acquiring the lock, we have access to the
280
+ // flavor field, and we've regained access to our
281
+ // respective native/green blocker field.
282
+ let prev = if can_block {
283
+ unsafe {
284
+ * self . native_blocker . get ( ) = 0 ;
285
+ mem:: replace ( & mut * self . flavor . get ( ) ,
286
+ NativeAcquisition )
287
+ }
271
288
} else {
272
- self . green_blocker = 0 ;
273
- self . flavor = GreenAcquisition ;
274
- }
289
+ unsafe {
290
+ * self . green_blocker . get ( ) = 0 ;
291
+ mem:: replace ( & mut * self . flavor . get ( ) ,
292
+ GreenAcquisition )
293
+ }
294
+ } ;
295
+ assert_eq ! ( prev, Unlocked ) ;
275
296
return Err ( unsafe {
276
297
BlockedTask :: cast_from_uint ( task)
277
298
} )
@@ -287,16 +308,16 @@ impl StaticMutex {
287
308
288
309
// Tasks which can block are super easy. These tasks just call the blocking
289
310
// `lock()` function on an OS mutex
290
- fn native_lock ( & mut self , t : ~Task ) {
311
+ fn native_lock ( & self , t : ~Task ) {
291
312
Local :: put ( t) ;
292
313
unsafe { self . lock . lock_noguard ( ) ; }
293
314
}
294
315
295
- fn native_unlock ( & mut self ) {
316
+ fn native_unlock ( & self ) {
296
317
unsafe { self . lock . unlock_noguard ( ) ; }
297
318
}
298
319
299
- fn green_lock ( & mut self , t : ~Task ) {
320
+ fn green_lock ( & self , t : ~Task ) {
300
321
// Green threads flag their presence with an atomic counter, and if they
301
322
// fail to be the first to the mutex, they enqueue themselves on a
302
323
// concurrent internal queue with a stack-allocated node.
@@ -318,7 +339,7 @@ impl StaticMutex {
318
339
} ) ;
319
340
}
320
341
321
- fn green_unlock ( & mut self ) {
342
+ fn green_unlock ( & self ) {
322
343
// If we're the only green thread, then no need to check the queue,
323
344
// otherwise the fixme above forces us to spin for a bit.
324
345
if self . green_cnt . fetch_sub ( 1 , atomics:: SeqCst ) == 1 { return }
@@ -333,7 +354,7 @@ impl StaticMutex {
333
354
task. wake ( ) . map ( |t| t. reawaken ( ) ) ;
334
355
}
335
356
336
- fn unlock ( & mut self ) {
357
+ fn unlock ( & self ) {
337
358
// Unlocking this mutex is a little tricky. We favor any task that is
338
359
// manually blocked (not in each of the separate locks) in order to help
339
360
// provide a little fairness (green threads will wake up the pending
@@ -351,8 +372,7 @@ impl StaticMutex {
351
372
// task needs to be woken, and in this case it's ok that the "mutex
352
373
// halves" are unlocked, we're just mainly dealing with the atomic state
353
374
// of the outer mutex.
354
- let flavor = self . flavor ;
355
- self . flavor = Unlocked ;
375
+ let flavor = unsafe { mem:: replace ( & mut * self . flavor . get ( ) , Unlocked ) } ;
356
376
357
377
let mut state = self . state . load ( atomics:: SeqCst ) ;
358
378
let mut unlocked = false ;
@@ -362,18 +382,18 @@ impl StaticMutex {
362
382
if state & GREEN_BLOCKED != 0 {
363
383
self . unset ( state, GREEN_BLOCKED ) ;
364
384
task = unsafe {
365
- BlockedTask :: cast_from_uint ( self . green_blocker )
385
+ * self . flavor . get ( ) = GreenAcquisition ;
386
+ let task = mem:: replace ( & mut * self . green_blocker . get ( ) , 0 ) ;
387
+ BlockedTask :: cast_from_uint ( task)
366
388
} ;
367
- self . green_blocker = 0 ;
368
- self . flavor = GreenAcquisition ;
369
389
break ;
370
390
} else if state & NATIVE_BLOCKED != 0 {
371
391
self . unset ( state, NATIVE_BLOCKED ) ;
372
392
task = unsafe {
373
- BlockedTask :: cast_from_uint ( self . native_blocker )
393
+ * self . flavor . get ( ) = NativeAcquisition ;
394
+ let task = mem:: replace ( & mut * self . native_blocker . get ( ) , 0 ) ;
395
+ BlockedTask :: cast_from_uint ( task)
374
396
} ;
375
- self . native_blocker = 0 ;
376
- self . flavor = NativeAcquisition ;
377
397
break ;
378
398
} else {
379
399
assert_eq ! ( state, LOCKED ) ;
@@ -405,7 +425,7 @@ impl StaticMutex {
405
425
}
406
426
407
427
/// Loops around a CAS to unset the `bit` in `state`
408
- fn unset ( & mut self , mut state : uint , bit : uint ) {
428
+ fn unset ( & self , mut state : uint , bit : uint ) {
409
429
loop {
410
430
assert ! ( state & bit != 0 ) ;
411
431
let new = state ^ bit;
@@ -426,7 +446,7 @@ impl StaticMutex {
426
446
/// *all* platforms. It may be the case that some platforms do not leak
427
447
/// memory if this method is not called, but this is not guaranteed to be
428
448
/// true on all platforms.
429
- pub unsafe fn destroy ( & mut self ) {
449
+ pub unsafe fn destroy ( & self ) {
430
450
self . lock . destroy ( )
431
451
}
432
452
}
@@ -437,9 +457,9 @@ impl Mutex {
437
457
Mutex {
438
458
lock : StaticMutex {
439
459
state : atomics:: AtomicUint :: new ( 0 ) ,
440
- flavor : Unlocked ,
441
- green_blocker : 0 ,
442
- native_blocker : 0 ,
460
+ flavor : Unsafe :: new ( Unlocked ) ,
461
+ green_blocker : Unsafe :: new ( 0 ) ,
462
+ native_blocker : Unsafe :: new ( 0 ) ,
443
463
green_cnt : atomics:: AtomicUint :: new ( 0 ) ,
444
464
q : q:: Queue :: new ( ) ,
445
465
lock : unsafe { mutex:: StaticNativeMutex :: new ( ) } ,
@@ -454,7 +474,7 @@ impl Mutex {
454
474
/// guard is dropped.
455
475
///
456
476
/// This function does not block.
457
- pub fn try_lock < ' a > ( & ' a mut self ) -> Option < Guard < ' a > > {
477
+ pub fn try_lock < ' a > ( & ' a self ) -> Option < Guard < ' a > > {
458
478
self . lock . try_lock ( )
459
479
}
460
480
@@ -464,13 +484,14 @@ impl Mutex {
464
484
/// the mutex. Upon returning, the task is the only task with the mutex
465
485
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
466
486
/// the guard goes out of scope, the mutex will be unlocked.
467
- pub fn lock < ' a > ( & ' a mut self ) -> Guard < ' a > { self . lock . lock ( ) }
487
+ pub fn lock < ' a > ( & ' a self ) -> Guard < ' a > { self . lock . lock ( ) }
468
488
}
469
489
470
490
impl < ' a > Guard < ' a > {
471
- fn new < ' b > ( lock : & ' b mut StaticMutex ) -> Guard < ' b > {
491
+ fn new < ' b > ( lock : & ' b StaticMutex ) -> Guard < ' b > {
472
492
if cfg ! ( debug) {
473
- assert ! ( lock. flavor != Unlocked ) ;
493
+ // once we've acquired a lock, it's ok to access the flavor
494
+ assert ! ( unsafe { * lock. flavor. get( ) != Unlocked } ) ;
474
495
assert ! ( lock. state. load( atomics:: SeqCst ) & LOCKED != 0 ) ;
475
496
}
476
497
Guard { lock : lock }
@@ -501,7 +522,7 @@ mod test {
501
522
502
523
#[ test]
503
524
fn smoke ( ) {
504
- let mut m = Mutex :: new ( ) ;
525
+ let m = Mutex :: new ( ) ;
505
526
drop ( m. lock ( ) ) ;
506
527
drop ( m. lock ( ) ) ;
507
528
}
@@ -552,7 +573,7 @@ mod test {
552
573
553
574
#[ test]
554
575
fn trylock ( ) {
555
- let mut m = Mutex :: new ( ) ;
576
+ let m = Mutex :: new ( ) ;
556
577
assert ! ( m. try_lock( ) . is_some( ) ) ;
557
578
}
558
579
}
0 commit comments