8
8
// option. This file may not be copied, modified, or distributed
9
9
// except according to those terms.
10
10
11
- use option:: * ;
12
- use super :: stack:: StackSegment ;
13
- use libc:: c_void;
14
- use uint;
15
- use cast:: { transmute, transmute_mut_unsafe,
16
- transmute_region, transmute_mut_region} ;
11
+ use std:: libc:: c_void;
12
+ use std:: uint;
13
+ use std:: cast:: { transmute, transmute_mut_unsafe,
14
+ transmute_region, transmute_mut_region} ;
15
+ use std:: unstable:: stack;
17
16
18
- pub static RED_ZONE : uint = 20 * 1024 ;
17
+ use stack :: StackSegment ;
19
18
20
19
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
21
20
// SSE regs. It would be marginally better not to do this. In C++ we
@@ -25,7 +24,7 @@ pub static RED_ZONE: uint = 20 * 1024;
25
24
// then misalign the regs again.
26
25
pub struct Context {
27
26
/// The context entry point, saved here for later destruction
28
- priv start : Option < ~ proc ( ) > ,
27
+ priv start : ~ Option < proc ( ) > ,
29
28
/// Hold the registers while the task or scheduler is suspended
30
29
priv regs : ~Registers ,
31
30
/// Lower bound and upper bound for the stack
@@ -35,40 +34,37 @@ pub struct Context {
35
34
impl Context {
36
35
pub fn empty ( ) -> Context {
37
36
Context {
38
- start : None ,
37
+ start : ~ None ,
39
38
regs : new_regs ( ) ,
40
39
stack_bounds : None ,
41
40
}
42
41
}
43
42
44
43
/// Create a new context that will resume execution by running proc()
45
44
pub fn new ( start : proc ( ) , stack : & mut StackSegment ) -> Context {
46
- // FIXME #7767: Putting main into a ~ so it's a thin pointer and can
47
- // be passed to the spawn function. Another unfortunate
48
- // allocation
49
- let start = ~start;
50
-
51
45
// The C-ABI function that is the task entry point
52
- extern fn task_start_wrapper ( f : & proc ( ) ) {
53
- // XXX(pcwalton): This may be sketchy.
54
- unsafe {
55
- let f: & || = transmute ( f ) ;
56
- ( * f) ( )
57
- }
46
+ extern fn task_start_wrapper ( f : & mut Option < proc ( ) > ) {
47
+ f. take_unwrap ( ) ( )
58
48
}
59
49
60
- let fp: * c_void = task_start_wrapper as * c_void ;
61
- let argp: * c_void = unsafe { transmute :: < & proc ( ) , * c_void > ( & * start) } ;
62
50
let sp: * uint = stack. end ( ) ;
63
51
let sp: * mut uint = unsafe { transmute_mut_unsafe ( sp) } ;
64
52
// Save and then immediately load the current context,
65
53
// which we will then modify to call the given function when restored
66
54
let mut regs = new_regs ( ) ;
67
55
unsafe {
68
- rust_swap_registers ( transmute_mut_region ( & mut * regs) , transmute_region ( & * regs) ) ;
56
+ rust_swap_registers ( transmute_mut_region ( & mut * regs) ,
57
+ transmute_region ( & * regs) ) ;
69
58
} ;
70
59
71
- initialize_call_frame ( & mut * regs, fp, argp, sp) ;
60
+ // FIXME #7767: Putting main into a ~ so it's a thin pointer and can
61
+ // be passed to the spawn function. Another unfortunate
62
+ // allocation
63
+ let box = ~Some ( start) ;
64
+ initialize_call_frame ( & mut * regs,
65
+ task_start_wrapper as * c_void ,
66
+ unsafe { transmute ( & * box) } ,
67
+ sp) ;
72
68
73
69
// Scheduler tasks don't have a stack in the "we allocated it" sense,
74
70
// but rather they run on pthreads stacks. We have complete control over
@@ -82,7 +78,7 @@ impl Context {
82
78
Some ( ( stack_base as uint , sp as uint ) )
83
79
} ;
84
80
return Context {
85
- start : Some ( start ) ,
81
+ start : box ,
86
82
regs : regs,
87
83
stack_bounds : bounds,
88
84
}
@@ -113,17 +109,18 @@ impl Context {
113
109
// invalid for the current task. Lucky for us `rust_swap_registers`
114
110
// is a C function so we don't have to worry about that!
115
111
match in_context. stack_bounds {
116
- Some ( ( lo, hi) ) => record_stack_bounds ( lo, hi) ,
112
+ Some ( ( lo, hi) ) => stack :: record_stack_bounds ( lo, hi) ,
117
113
// If we're going back to one of the original contexts or
118
114
// something that's possibly not a "normal task", then reset
119
115
// the stack limit to 0 to make morestack never fail
120
- None => record_stack_bounds ( 0 , uint:: max_value) ,
116
+ None => stack :: record_stack_bounds ( 0 , uint:: max_value) ,
121
117
}
122
118
rust_swap_registers ( out_regs, in_regs)
123
119
}
124
120
}
125
121
}
126
122
123
+ #[ link( name = "rustrt" , kind = "static" ) ]
127
124
extern {
128
125
fn rust_swap_registers ( out_regs : * mut Registers , in_regs : * Registers ) ;
129
126
}
@@ -282,182 +279,6 @@ fn align_down(sp: *mut uint) -> *mut uint {
282
279
// ptr::mut_offset is positive ints only
283
280
#[ inline]
284
281
pub fn mut_offset < T > ( ptr : * mut T , count : int ) -> * mut T {
285
- use mem:: size_of;
282
+ use std :: mem:: size_of;
286
283
( ptr as int + count * ( size_of :: < T > ( ) as int ) ) as * mut T
287
284
}
288
-
289
- #[ inline( always) ]
290
- pub unsafe fn record_stack_bounds ( stack_lo : uint , stack_hi : uint ) {
291
- // When the old runtime had segmented stacks, it used a calculation that was
292
- // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
293
- // symbol resolution, llvm function calls, etc. In theory this red zone
294
- // value is 0, but it matters far less when we have gigantic stacks because
295
- // we don't need to be so exact about our stack budget. The "fudge factor"
296
- // was because LLVM doesn't emit a stack check for functions < 256 bytes in
297
- // size. Again though, we have giant stacks, so we round all these
298
- // calculations up to the nice round number of 20k.
299
- record_sp_limit ( stack_lo + RED_ZONE ) ;
300
-
301
- return target_record_stack_bounds ( stack_lo, stack_hi) ;
302
-
303
- #[ cfg( not( windows) ) ] #[ cfg( not( target_arch = "x86_64" ) ) ] #[ inline( always) ]
304
- unsafe fn target_record_stack_bounds ( _stack_lo : uint , _stack_hi : uint ) { }
305
- #[ cfg( windows, target_arch = "x86_64" ) ] #[ inline( always) ]
306
- unsafe fn target_record_stack_bounds ( stack_lo : uint , stack_hi : uint ) {
307
- // Windows compiles C functions which may check the stack bounds. This
308
- // means that if we want to perform valid FFI on windows, then we need
309
- // to ensure that the stack bounds are what they truly are for this
310
- // task. More info can be found at:
311
- // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
312
- //
313
- // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
314
- asm ! ( "mov $0, %gs:0x08" :: "r" ( stack_hi) :: "volatile" ) ;
315
- asm ! ( "mov $0, %gs:0x10" :: "r" ( stack_lo) :: "volatile" ) ;
316
- }
317
- }
318
-
319
- /// Records the current limit of the stack as specified by `end`.
320
- ///
321
- /// This is stored in an OS-dependent location, likely inside of the thread
322
- /// local storage. The location that the limit is stored is a pre-ordained
323
- /// location because it's where LLVM has emitted code to check.
324
- ///
325
- /// Note that this cannot be called under normal circumstances. This function is
326
- /// changing the stack limit, so upon returning any further function calls will
327
- /// possibly be triggering the morestack logic if you're not careful.
328
- ///
329
- /// Also note that this and all of the inside functions are all flagged as
330
- /// "inline(always)" because they're messing around with the stack limits. This
331
- /// would be unfortunate for the functions themselves to trigger a morestack
332
- /// invocation (if they were an actual function call).
333
- #[ inline( always) ]
334
- pub unsafe fn record_sp_limit ( limit : uint ) {
335
- return target_record_sp_limit ( limit) ;
336
-
337
- // x86-64
338
- #[ cfg( target_arch = "x86_64" , target_os = "macos" ) ] #[ inline( always) ]
339
- unsafe fn target_record_sp_limit ( limit : uint ) {
340
- asm ! ( "movq $$0x60+90*8, %rsi
341
- movq $0, %gs:(%rsi)" :: "r" ( limit) : "rsi" : "volatile" )
342
- }
343
- #[ cfg( target_arch = "x86_64" , target_os = "linux" ) ] #[ inline( always) ]
344
- unsafe fn target_record_sp_limit ( limit : uint ) {
345
- asm ! ( "movq $0, %fs:112" :: "r" ( limit) :: "volatile" )
346
- }
347
- #[ cfg( target_arch = "x86_64" , target_os = "win32" ) ] #[ inline( always) ]
348
- unsafe fn target_record_sp_limit ( limit : uint ) {
349
- // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
350
- // store this inside of the "arbitrary data slot", but double the size
351
- // because this is 64 bit instead of 32 bit
352
- asm ! ( "movq $0, %gs:0x28" :: "r" ( limit) :: "volatile" )
353
- }
354
- #[ cfg( target_arch = "x86_64" , target_os = "freebsd" ) ] #[ inline( always) ]
355
- unsafe fn target_record_sp_limit ( limit : uint ) {
356
- asm ! ( "movq $0, %fs:24" :: "r" ( limit) :: "volatile" )
357
- }
358
-
359
- // x86
360
- #[ cfg( target_arch = "x86" , target_os = "macos" ) ] #[ inline( always) ]
361
- unsafe fn target_record_sp_limit ( limit : uint ) {
362
- asm ! ( "movl $$0x48+90*4, %eax
363
- movl $0, %gs:(%eax)" :: "r" ( limit) : "eax" : "volatile" )
364
- }
365
- #[ cfg( target_arch = "x86" , target_os = "linux" ) ]
366
- #[ cfg( target_arch = "x86" , target_os = "freebsd" ) ] #[ inline( always) ]
367
- unsafe fn target_record_sp_limit ( limit : uint ) {
368
- asm ! ( "movl $0, %gs:48" :: "r" ( limit) :: "volatile" )
369
- }
370
- #[ cfg( target_arch = "x86" , target_os = "win32" ) ] #[ inline( always) ]
371
- unsafe fn target_record_sp_limit ( limit : uint ) {
372
- // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
373
- // store this inside of the "arbitrary data slot"
374
- asm ! ( "movl $0, %fs:0x14" :: "r" ( limit) :: "volatile" )
375
- }
376
-
377
- // mips, arm - Some brave soul can port these to inline asm, but it's over
378
- // my head personally
379
- #[ cfg( target_arch = "mips" ) ]
380
- #[ cfg( target_arch = "arm" ) ] #[ inline( always) ]
381
- unsafe fn target_record_sp_limit ( limit : uint ) {
382
- return record_sp_limit ( limit as * c_void ) ;
383
- extern {
384
- fn record_sp_limit ( limit : * c_void ) ;
385
- }
386
- }
387
- }
388
-
389
- /// The counterpart of the function above, this function will fetch the current
390
- /// stack limit stored in TLS.
391
- ///
392
- /// Note that all of these functions are meant to be exact counterparts of their
393
- /// brethren above, except that the operands are reversed.
394
- ///
395
- /// As with the setter, this function does not have a __morestack header and can
396
- /// therefore be called in a "we're out of stack" situation.
397
- #[ inline( always) ]
398
- // currently only called by `rust_stack_exhausted`, which doesn't
399
- // exist in a test build.
400
- #[ cfg( not( test) ) ]
401
- pub unsafe fn get_sp_limit ( ) -> uint {
402
- return target_get_sp_limit ( ) ;
403
-
404
- // x86-64
405
- #[ cfg( target_arch = "x86_64" , target_os = "macos" ) ] #[ inline( always) ]
406
- unsafe fn target_get_sp_limit ( ) -> uint {
407
- let limit;
408
- asm ! ( "movq $$0x60+90*8, %rsi
409
- movq %gs:(%rsi), $0" : "=r" ( limit) :: "rsi" : "volatile" ) ;
410
- return limit;
411
- }
412
- #[ cfg( target_arch = "x86_64" , target_os = "linux" ) ] #[ inline( always) ]
413
- unsafe fn target_get_sp_limit ( ) -> uint {
414
- let limit;
415
- asm ! ( "movq %fs:112, $0" : "=r" ( limit) :: : "volatile" ) ;
416
- return limit;
417
- }
418
- #[ cfg( target_arch = "x86_64" , target_os = "win32" ) ] #[ inline( always) ]
419
- unsafe fn target_get_sp_limit ( ) -> uint {
420
- let limit;
421
- asm ! ( "movq %gs:0x28, $0" : "=r" ( limit) :: : "volatile" ) ;
422
- return limit;
423
- }
424
- #[ cfg( target_arch = "x86_64" , target_os = "freebsd" ) ] #[ inline( always) ]
425
- unsafe fn target_get_sp_limit ( ) -> uint {
426
- let limit;
427
- asm ! ( "movq %fs:24, $0" : "=r" ( limit) :: : "volatile" ) ;
428
- return limit;
429
- }
430
-
431
- // x86
432
- #[ cfg( target_arch = "x86" , target_os = "macos" ) ] #[ inline( always) ]
433
- unsafe fn target_get_sp_limit ( ) -> uint {
434
- let limit;
435
- asm ! ( "movl $$0x48+90*4, %eax
436
- movl %gs:(%eax), $0" : "=r" ( limit) :: "eax" : "volatile" ) ;
437
- return limit;
438
- }
439
- #[ cfg( target_arch = "x86" , target_os = "linux" ) ]
440
- #[ cfg( target_arch = "x86" , target_os = "freebsd" ) ] #[ inline( always) ]
441
- unsafe fn target_get_sp_limit ( ) -> uint {
442
- let limit;
443
- asm ! ( "movl %gs:48, $0" : "=r" ( limit) :: : "volatile" ) ;
444
- return limit;
445
- }
446
- #[ cfg( target_arch = "x86" , target_os = "win32" ) ] #[ inline( always) ]
447
- unsafe fn target_get_sp_limit ( ) -> uint {
448
- let limit;
449
- asm ! ( "movl %fs:0x14, $0" : "=r" ( limit) :: : "volatile" ) ;
450
- return limit;
451
- }
452
-
453
- // mips, arm - Some brave soul can port these to inline asm, but it's over
454
- // my head personally
455
- #[ cfg( target_arch = "mips" ) ]
456
- #[ cfg( target_arch = "arm" ) ] #[ inline( always) ]
457
- unsafe fn target_get_sp_limit ( ) -> uint {
458
- return get_sp_limit ( ) as uint ;
459
- extern {
460
- fn get_sp_limit ( ) -> * c_void ;
461
- }
462
- }
463
- }
0 commit comments