@@ -40,6 +40,7 @@ use option::{Some, None, Option};
40
40
use owned:: Box ;
41
41
use ptr:: RawPtr ;
42
42
use sync:: atomics:: { AtomicPtr , Relaxed , AtomicUint , Acquire , Release } ;
43
+ use ty:: Unsafe ;
43
44
44
45
// Node within the linked list queue of messages to send
45
46
struct Node < T > {
@@ -56,13 +57,13 @@ struct Node<T> {
56
57
/// time.
57
58
pub struct Queue < T > {
58
59
// consumer fields
59
- tail : * mut Node < T > , // where to pop from
60
+ tail : Unsafe < * mut Node < T > > , // where to pop from
60
61
tail_prev : AtomicPtr < Node < T > > , // where to pop from
61
62
62
63
// producer fields
63
- head : * mut Node < T > , // where to push to
64
- first : * mut Node < T > , // where to get new nodes from
65
- tail_copy : * mut Node < T > , // between first/tail
64
+ head : Unsafe < * mut Node < T > > , // where to push to
65
+ first : Unsafe < * mut Node < T > > , // where to get new nodes from
66
+ tail_copy : Unsafe < * mut Node < T > > , // between first/tail
66
67
67
68
// Cache maintenance fields. Additions and subtractions are stored
68
69
// separately in order to allow them to use nonatomic addition/subtraction.
@@ -101,11 +102,11 @@ impl<T: Send> Queue<T> {
101
102
let n2 = Node :: new ( ) ;
102
103
unsafe { ( * n1) . next . store ( n2, Relaxed ) }
103
104
Queue {
104
- tail : n2 ,
105
+ tail : Unsafe :: new ( n2 ) ,
105
106
tail_prev : AtomicPtr :: new ( n1) ,
106
- head : n2 ,
107
- first : n1 ,
108
- tail_copy : n1 ,
107
+ head : Unsafe :: new ( n2 ) ,
108
+ first : Unsafe :: new ( n1 ) ,
109
+ tail_copy : Unsafe :: new ( n1 ) ,
109
110
cache_bound : bound,
110
111
cache_additions : AtomicUint :: new ( 0 ) ,
111
112
cache_subtractions : AtomicUint :: new ( 0 ) ,
@@ -114,43 +115,43 @@ impl<T: Send> Queue<T> {
114
115
115
116
/// Pushes a new value onto this queue. Note that to use this function
116
117
/// safely, it must be externally guaranteed that there is only one pusher.
117
- pub fn push ( & mut self , t : T ) {
118
+ pub fn push ( & self , t : T ) {
118
119
unsafe {
119
120
// Acquire a node (which either uses a cached one or allocates a new
120
121
// one), and then append this to the 'head' node.
121
122
let n = self . alloc ( ) ;
122
123
assert ! ( ( * n) . value. is_none( ) ) ;
123
124
( * n) . value = Some ( t) ;
124
125
( * n) . next . store ( 0 as * mut Node < T > , Relaxed ) ;
125
- ( * self . head ) . next . store ( n, Release ) ;
126
- self . head = n;
126
+ ( * * self . head . get ( ) ) . next . store ( n, Release ) ;
127
+ * self . head . get ( ) = n;
127
128
}
128
129
}
129
130
130
- unsafe fn alloc ( & mut self ) -> * mut Node < T > {
131
+ unsafe fn alloc ( & self ) -> * mut Node < T > {
131
132
// First try to see if we can consume the 'first' node for our uses.
132
133
// We try to avoid as many atomic instructions as possible here, so
133
134
// the addition to cache_subtractions is not atomic (plus we're the
134
135
// only one subtracting from the cache).
135
- if self . first != self . tail_copy {
136
+ if * self . first . get ( ) != * self . tail_copy . get ( ) {
136
137
if self . cache_bound > 0 {
137
138
let b = self . cache_subtractions . load ( Relaxed ) ;
138
139
self . cache_subtractions . store ( b + 1 , Relaxed ) ;
139
140
}
140
- let ret = self . first ;
141
- self . first = ( * ret) . next . load ( Relaxed ) ;
141
+ let ret = * self . first . get ( ) ;
142
+ * self . first . get ( ) = ( * ret) . next . load ( Relaxed ) ;
142
143
return ret;
143
144
}
144
145
// If the above fails, then update our copy of the tail and try
145
146
// again.
146
- self . tail_copy = self . tail_prev . load ( Acquire ) ;
147
- if self . first != self . tail_copy {
147
+ * self . tail_copy . get ( ) = self . tail_prev . load ( Acquire ) ;
148
+ if * self . first . get ( ) != * self . tail_copy . get ( ) {
148
149
if self . cache_bound > 0 {
149
150
let b = self . cache_subtractions . load ( Relaxed ) ;
150
151
self . cache_subtractions . store ( b + 1 , Relaxed ) ;
151
152
}
152
- let ret = self . first ;
153
- self . first = ( * ret) . next . load ( Relaxed ) ;
153
+ let ret = * self . first . get ( ) ;
154
+ * self . first . get ( ) = ( * ret) . next . load ( Relaxed ) ;
154
155
return ret;
155
156
}
156
157
// If all of that fails, then we have to allocate a new node
@@ -160,19 +161,19 @@ impl<T: Send> Queue<T> {
160
161
161
162
/// Attempts to pop a value from this queue. Remember that to use this type
162
163
/// safely you must ensure that there is only one popper at a time.
163
- pub fn pop ( & mut self ) -> Option < T > {
164
+ pub fn pop ( & self ) -> Option < T > {
164
165
unsafe {
165
166
// The `tail` node is not actually a used node, but rather a
166
167
// sentinel from where we should start popping from. Hence, look at
167
168
// tail's next field and see if we can use it. If we do a pop, then
168
169
// the current tail node is a candidate for going into the cache.
169
- let tail = self . tail ;
170
+ let tail = * self . tail . get ( ) ;
170
171
let next = ( * tail) . next . load ( Acquire ) ;
171
172
if next. is_null ( ) { return None }
172
173
assert ! ( ( * next) . value. is_some( ) ) ;
173
174
let ret = ( * next) . value . take ( ) ;
174
175
175
- self . tail = next;
176
+ * self . tail . get ( ) = next;
176
177
if self . cache_bound == 0 {
177
178
self . tail_prev . store ( tail, Release ) ;
178
179
} else {
@@ -197,11 +198,11 @@ impl<T: Send> Queue<T> {
197
198
198
199
/// Attempts to peek at the head of the queue, returning `None` if the queue
199
200
/// has no data currently
200
- pub fn peek < ' a > ( & ' a mut self ) -> Option < & ' a mut T > {
201
+ pub fn peek < ' a > ( & ' a self ) -> Option < & ' a mut T > {
201
202
// This is essentially the same as above with all the popping bits
202
203
// stripped out.
203
204
unsafe {
204
- let tail = self . tail ;
205
+ let tail = * self . tail . get ( ) ;
205
206
let next = ( * tail) . next . load ( Acquire ) ;
206
207
if next. is_null ( ) { return None }
207
208
return ( * next) . value . as_mut ( ) ;
@@ -213,7 +214,7 @@ impl<T: Send> Queue<T> {
213
214
impl < T : Send > Drop for Queue < T > {
214
215
fn drop ( & mut self ) {
215
216
unsafe {
216
- let mut cur = self . first ;
217
+ let mut cur = * self . first . get ( ) ;
217
218
while !cur. is_null ( ) {
218
219
let next = ( * cur) . next . load ( Relaxed ) ;
219
220
let _n: Box < Node < T > > = mem:: transmute ( cur) ;
0 commit comments