Skip to content

Commit 51abdee

Browse files
committed
green: Rip the bandaid off, introduce libgreen
This extracts everything related to green scheduling from libstd and introduces a new libgreen crate. This mostly involves deleting most of std::rt and moving it to libgreen. Along with the movement of code, this commit rearchitects many functions in the scheduler in order to adapt to the fact that Local::take now *only* works on a Task, not a scheduler. This mostly just involved threading the current green task through in a few locations, but there were one or two spots where things got hairy. There are a few repercussions of this commit: * tube/rc have been removed (the runtime implementation of rc) * There is no longer a "single threaded" spawning mode for tasks. This is now encompassed by 1:1 scheduling + communication. Convenience methods have been introduced that are specific to libgreen to assist in the spawning of pools of schedulers.
1 parent 6aadc9d commit 51abdee

28 files changed

+1695
-2040
lines changed

src/libextra/task_pool.rs

Lines changed: 3 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
/// parallelism.
1515
1616

17-
use std::task::SchedMode;
1817
use std::task;
1918
use std::vec;
2019

@@ -46,7 +45,6 @@ impl<T> TaskPool<T> {
4645
/// returns a function which, given the index of the task, should return
4746
/// local data to be kept around in that task.
4847
pub fn new(n_tasks: uint,
49-
opt_sched_mode: Option<SchedMode>,
5048
init_fn_factory: || -> proc(uint) -> T)
5149
-> TaskPool<T> {
5250
assert!(n_tasks >= 1);
@@ -65,18 +63,8 @@ impl<T> TaskPool<T> {
6563
}
6664
};
6765

68-
// Start the task.
69-
match opt_sched_mode {
70-
None => {
71-
// Run on this scheduler.
72-
task::spawn(task_body);
73-
}
74-
Some(sched_mode) => {
75-
let mut task = task::task();
76-
task.sched_mode(sched_mode);
77-
task.spawn(task_body);
78-
}
79-
}
66+
// Run on this scheduler.
67+
task::spawn(task_body);
8068

8169
chan
8270
});
@@ -99,7 +87,7 @@ fn test_task_pool() {
9987
let g: proc(uint) -> uint = proc(i) i;
10088
g
10189
};
102-
let mut pool = TaskPool::new(4, Some(SingleThreaded), f);
90+
let mut pool = TaskPool::new(4, f);
10391
8.times(|| {
10492
pool.execute(proc(i) println!("Hello from thread {}!", *i));
10593
})

src/libstd/rt/basic.rs renamed to src/libgreen/basic.rs

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,15 +11,15 @@
1111
//! This is a basic event loop implementation not meant for any "real purposes"
1212
//! other than testing the scheduler and proving that it's possible to have a
1313
//! pluggable event loop.
14+
//!
15+
//! This implementation is also used as the fallback implementation of an event
16+
//! loop if no other one is provided (and M:N scheduling is desired).
1417
15-
use prelude::*;
16-
17-
use cast;
18-
use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback,
19-
Callback};
20-
use unstable::sync::Exclusive;
21-
use io::native;
22-
use util;
18+
use std::cast;
19+
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback,
20+
Callback};
21+
use std::unstable::sync::Exclusive;
22+
use std::util;
2323

2424
/// This is the only exported function from this module.
2525
pub fn event_loop() -> ~EventLoop {
@@ -32,7 +32,6 @@ struct BasicLoop {
3232
remotes: ~[(uint, ~Callback)],
3333
next_remote: uint,
3434
messages: Exclusive<~[Message]>,
35-
io: ~IoFactory,
3635
}
3736

3837
enum Message { RunRemote(uint), RemoveRemote(uint) }
@@ -45,7 +44,6 @@ impl BasicLoop {
4544
next_remote: 0,
4645
remotes: ~[],
4746
messages: Exclusive::new(~[]),
48-
io: ~native::IoFactory as ~IoFactory,
4947
}
5048
}
5149

@@ -159,10 +157,7 @@ impl EventLoop for BasicLoop {
159157
~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback
160158
}
161159

162-
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> {
163-
let factory: &mut IoFactory = self.io;
164-
Some(factory)
165-
}
160+
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { None }
166161
}
167162

168163
struct BasicRemote {

src/libstd/rt/context.rs renamed to src/libgreen/context.rs

Lines changed: 25 additions & 204 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,13 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
use option::*;
12-
use super::stack::StackSegment;
13-
use libc::c_void;
14-
use uint;
15-
use cast::{transmute, transmute_mut_unsafe,
16-
transmute_region, transmute_mut_region};
11+
use std::libc::c_void;
12+
use std::uint;
13+
use std::cast::{transmute, transmute_mut_unsafe,
14+
transmute_region, transmute_mut_region};
15+
use std::unstable::stack;
1716

18-
pub static RED_ZONE: uint = 20 * 1024;
17+
use stack::StackSegment;
1918

2019
// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
2120
// SSE regs. It would be marginally better not to do this. In C++ we
@@ -25,7 +24,7 @@ pub static RED_ZONE: uint = 20 * 1024;
2524
// then misalign the regs again.
2625
pub struct Context {
2726
/// The context entry point, saved here for later destruction
28-
priv start: Option<~proc()>,
27+
priv start: ~Option<proc()>,
2928
/// Hold the registers while the task or scheduler is suspended
3029
priv regs: ~Registers,
3130
/// Lower bound and upper bound for the stack
@@ -35,40 +34,37 @@ pub struct Context {
3534
impl Context {
3635
pub fn empty() -> Context {
3736
Context {
38-
start: None,
37+
start: ~None,
3938
regs: new_regs(),
4039
stack_bounds: None,
4140
}
4241
}
4342

4443
/// Create a new context that will resume execution by running proc()
4544
pub fn new(start: proc(), stack: &mut StackSegment) -> Context {
46-
// FIXME #7767: Putting main into a ~ so it's a thin pointer and can
47-
// be passed to the spawn function. Another unfortunate
48-
// allocation
49-
let start = ~start;
50-
5145
// The C-ABI function that is the task entry point
52-
extern fn task_start_wrapper(f: &proc()) {
53-
// XXX(pcwalton): This may be sketchy.
54-
unsafe {
55-
let f: &|| = transmute(f);
56-
(*f)()
57-
}
46+
extern fn task_start_wrapper(f: &mut Option<proc()>) {
47+
f.take_unwrap()()
5848
}
5949

60-
let fp: *c_void = task_start_wrapper as *c_void;
61-
let argp: *c_void = unsafe { transmute::<&proc(), *c_void>(&*start) };
6250
let sp: *uint = stack.end();
6351
let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) };
6452
// Save and then immediately load the current context,
6553
// which we will then modify to call the given function when restored
6654
let mut regs = new_regs();
6755
unsafe {
68-
rust_swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs));
56+
rust_swap_registers(transmute_mut_region(&mut *regs),
57+
transmute_region(&*regs));
6958
};
7059

71-
initialize_call_frame(&mut *regs, fp, argp, sp);
60+
// FIXME #7767: Putting main into a ~ so it's a thin pointer and can
61+
// be passed to the spawn function. Another unfortunate
62+
// allocation
63+
let box = ~Some(start);
64+
initialize_call_frame(&mut *regs,
65+
task_start_wrapper as *c_void,
66+
unsafe { transmute(&*box) },
67+
sp);
7268

7369
// Scheduler tasks don't have a stack in the "we allocated it" sense,
7470
// but rather they run on pthreads stacks. We have complete control over
@@ -82,7 +78,7 @@ impl Context {
8278
Some((stack_base as uint, sp as uint))
8379
};
8480
return Context {
85-
start: Some(start),
81+
start: box,
8682
regs: regs,
8783
stack_bounds: bounds,
8884
}
@@ -113,17 +109,18 @@ impl Context {
113109
// invalid for the current task. Lucky for us `rust_swap_registers`
114110
// is a C function so we don't have to worry about that!
115111
match in_context.stack_bounds {
116-
Some((lo, hi)) => record_stack_bounds(lo, hi),
112+
Some((lo, hi)) => stack::record_stack_bounds(lo, hi),
117113
// If we're going back to one of the original contexts or
118114
// something that's possibly not a "normal task", then reset
119115
// the stack limit to 0 to make morestack never fail
120-
None => record_stack_bounds(0, uint::max_value),
116+
None => stack::record_stack_bounds(0, uint::max_value),
121117
}
122118
rust_swap_registers(out_regs, in_regs)
123119
}
124120
}
125121
}
126122

123+
#[link(name = "rustrt", kind = "static")]
127124
extern {
128125
fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers);
129126
}
@@ -282,182 +279,6 @@ fn align_down(sp: *mut uint) -> *mut uint {
282279
// ptr::mut_offset is positive ints only
283280
#[inline]
284281
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
285-
use mem::size_of;
282+
use std::mem::size_of;
286283
(ptr as int + count * (size_of::<T>() as int)) as *mut T
287284
}
288-
289-
#[inline(always)]
290-
pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) {
291-
// When the old runtime had segmented stacks, it used a calculation that was
292-
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
293-
// symbol resolution, llvm function calls, etc. In theory this red zone
294-
// value is 0, but it matters far less when we have gigantic stacks because
295-
// we don't need to be so exact about our stack budget. The "fudge factor"
296-
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
297-
// size. Again though, we have giant stacks, so we round all these
298-
// calculations up to the nice round number of 20k.
299-
record_sp_limit(stack_lo + RED_ZONE);
300-
301-
return target_record_stack_bounds(stack_lo, stack_hi);
302-
303-
#[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)]
304-
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
305-
#[cfg(windows, target_arch = "x86_64")] #[inline(always)]
306-
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
307-
// Windows compiles C functions which may check the stack bounds. This
308-
// means that if we want to perform valid FFI on windows, then we need
309-
// to ensure that the stack bounds are what they truly are for this
310-
// task. More info can be found at:
311-
// https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
312-
//
313-
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
314-
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
315-
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
316-
}
317-
}
318-
319-
/// Records the current limit of the stack as specified by `end`.
320-
///
321-
/// This is stored in an OS-dependent location, likely inside of the thread
322-
/// local storage. The location that the limit is stored is a pre-ordained
323-
/// location because it's where LLVM has emitted code to check.
324-
///
325-
/// Note that this cannot be called under normal circumstances. This function is
326-
/// changing the stack limit, so upon returning any further function calls will
327-
/// possibly be triggering the morestack logic if you're not careful.
328-
///
329-
/// Also note that this and all of the inside functions are all flagged as
330-
/// "inline(always)" because they're messing around with the stack limits. This
331-
/// would be unfortunate for the functions themselves to trigger a morestack
332-
/// invocation (if they were an actual function call).
333-
#[inline(always)]
334-
pub unsafe fn record_sp_limit(limit: uint) {
335-
return target_record_sp_limit(limit);
336-
337-
// x86-64
338-
#[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
339-
unsafe fn target_record_sp_limit(limit: uint) {
340-
asm!("movq $$0x60+90*8, %rsi
341-
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
342-
}
343-
#[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
344-
unsafe fn target_record_sp_limit(limit: uint) {
345-
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
346-
}
347-
#[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
348-
unsafe fn target_record_sp_limit(limit: uint) {
349-
// see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
350-
// store this inside of the "arbitrary data slot", but double the size
351-
// because this is 64 bit instead of 32 bit
352-
asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile")
353-
}
354-
#[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
355-
unsafe fn target_record_sp_limit(limit: uint) {
356-
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
357-
}
358-
359-
// x86
360-
#[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
361-
unsafe fn target_record_sp_limit(limit: uint) {
362-
asm!("movl $$0x48+90*4, %eax
363-
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
364-
}
365-
#[cfg(target_arch = "x86", target_os = "linux")]
366-
#[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
367-
unsafe fn target_record_sp_limit(limit: uint) {
368-
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
369-
}
370-
#[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
371-
unsafe fn target_record_sp_limit(limit: uint) {
372-
// see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
373-
// store this inside of the "arbitrary data slot"
374-
asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile")
375-
}
376-
377-
// mips, arm - Some brave soul can port these to inline asm, but it's over
378-
// my head personally
379-
#[cfg(target_arch = "mips")]
380-
#[cfg(target_arch = "arm")] #[inline(always)]
381-
unsafe fn target_record_sp_limit(limit: uint) {
382-
return record_sp_limit(limit as *c_void);
383-
extern {
384-
fn record_sp_limit(limit: *c_void);
385-
}
386-
}
387-
}
388-
389-
/// The counterpart of the function above, this function will fetch the current
390-
/// stack limit stored in TLS.
391-
///
392-
/// Note that all of these functions are meant to be exact counterparts of their
393-
/// brethren above, except that the operands are reversed.
394-
///
395-
/// As with the setter, this function does not have a __morestack header and can
396-
/// therefore be called in a "we're out of stack" situation.
397-
#[inline(always)]
398-
// currently only called by `rust_stack_exhausted`, which doesn't
399-
// exist in a test build.
400-
#[cfg(not(test))]
401-
pub unsafe fn get_sp_limit() -> uint {
402-
return target_get_sp_limit();
403-
404-
// x86-64
405-
#[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
406-
unsafe fn target_get_sp_limit() -> uint {
407-
let limit;
408-
asm!("movq $$0x60+90*8, %rsi
409-
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
410-
return limit;
411-
}
412-
#[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
413-
unsafe fn target_get_sp_limit() -> uint {
414-
let limit;
415-
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
416-
return limit;
417-
}
418-
#[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
419-
unsafe fn target_get_sp_limit() -> uint {
420-
let limit;
421-
asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile");
422-
return limit;
423-
}
424-
#[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
425-
unsafe fn target_get_sp_limit() -> uint {
426-
let limit;
427-
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
428-
return limit;
429-
}
430-
431-
// x86
432-
#[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
433-
unsafe fn target_get_sp_limit() -> uint {
434-
let limit;
435-
asm!("movl $$0x48+90*4, %eax
436-
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
437-
return limit;
438-
}
439-
#[cfg(target_arch = "x86", target_os = "linux")]
440-
#[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
441-
unsafe fn target_get_sp_limit() -> uint {
442-
let limit;
443-
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
444-
return limit;
445-
}
446-
#[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
447-
unsafe fn target_get_sp_limit() -> uint {
448-
let limit;
449-
asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile");
450-
return limit;
451-
}
452-
453-
// mips, arm - Some brave soul can port these to inline asm, but it's over
454-
// my head personally
455-
#[cfg(target_arch = "mips")]
456-
#[cfg(target_arch = "arm")] #[inline(always)]
457-
unsafe fn target_get_sp_limit() -> uint {
458-
return get_sp_limit() as uint;
459-
extern {
460-
fn get_sp_limit() -> *c_void;
461-
}
462-
}
463-
}

0 commit comments

Comments
 (0)