From 29d83002a27e6f47759b4a3bfe741fb061107816 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 20 May 2013 16:43:31 -0700 Subject: [PATCH 001/111] core::rt: Move uv idle tests to idle mod --- src/libcore/rt/uv/idle.rs | 62 +++++++++++++++++++++++++++++++++++++++ src/libcore/rt/uv/mod.rs | 54 ---------------------------------- 2 files changed, 62 insertions(+), 54 deletions(-) diff --git a/src/libcore/rt/uv/idle.rs b/src/libcore/rt/uv/idle.rs index 2cf0b5c487288..a81ab48696a36 100644 --- a/src/libcore/rt/uv/idle.rs +++ b/src/libcore/rt/uv/idle.rs @@ -89,3 +89,65 @@ impl NativeHandle<*uvll::uv_idle_t> for IdleWatcher { match self { &IdleWatcher(ptr) => ptr } } } + +#[cfg(test)] +mod test { + + use rt::uv::Loop; + use super::*; + use unstable::run_in_bare_thread; + + #[test] + #[ignore(reason = "valgrind - loop destroyed before watcher?")] + fn idle_new_then_close() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let idle_watcher = { IdleWatcher::new(&mut loop_) }; + idle_watcher.close(||()); + } + } + + #[test] + fn idle_smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; + let mut count = 10; + let count_ptr: *mut int = &mut count; + do idle_watcher.start |idle_watcher, status| { + let mut idle_watcher = idle_watcher; + assert!(status.is_none()); + if unsafe { *count_ptr == 10 } { + idle_watcher.stop(); + idle_watcher.close(||()); + } else { + unsafe { *count_ptr = *count_ptr + 1; } + } + } + loop_.run(); + loop_.close(); + assert_eq!(count, 10); + } + } + + #[test] + fn idle_start_stop_start() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; + do idle_watcher.start |idle_watcher, status| { + let mut idle_watcher = idle_watcher; + assert!(status.is_none()); + idle_watcher.stop(); + do idle_watcher.start |idle_watcher, status| { + assert!(status.is_none()); + let mut idle_watcher = idle_watcher; + idle_watcher.stop(); + idle_watcher.close(||()); + } + } + loop_.run(); + loop_.close(); + } + } +} diff --git a/src/libcore/rt/uv/mod.rs b/src/libcore/rt/uv/mod.rs index 2bd657fd8641f..8cc596b2876d9 100644 --- a/src/libcore/rt/uv/mod.rs +++ b/src/libcore/rt/uv/mod.rs @@ -364,57 +364,3 @@ fn loop_smoke_test() { loop_.close(); } } - -#[test] -#[ignore(reason = "valgrind - loop destroyed before watcher?")] -fn idle_new_then_close() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let idle_watcher = { IdleWatcher::new(&mut loop_) }; - idle_watcher.close(||()); - } -} - -#[test] -fn idle_smoke_test() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; - let mut count = 10; - let count_ptr: *mut int = &mut count; - do idle_watcher.start |idle_watcher, status| { - let mut idle_watcher = idle_watcher; - assert!(status.is_none()); - if unsafe { *count_ptr == 10 } { - idle_watcher.stop(); - idle_watcher.close(||()); - } else { - unsafe { *count_ptr = *count_ptr + 1; } - } - } - loop_.run(); - loop_.close(); - assert_eq!(count, 10); - } -} - -#[test] -fn idle_start_stop_start() { - do run_in_bare_thread { - let mut loop_ = Loop::new(); - let mut idle_watcher = { IdleWatcher::new(&mut loop_) }; - do idle_watcher.start |idle_watcher, status| { - let mut idle_watcher = idle_watcher; - assert!(status.is_none()); - idle_watcher.stop(); - do idle_watcher.start |idle_watcher, status| { - assert!(status.is_none()); - let mut idle_watcher = idle_watcher; - idle_watcher.stop(); - idle_watcher.close(||()); - } - } - loop_.run(); - loop_.close(); - } -} From 807269041437411df49a9a893c86310283d6eb91 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 20 May 2013 18:16:09 -0700 Subject: [PATCH 002/111] core::rt: Add bindings for async uv handles --- src/libcore/rt/uv/async.rs | 105 +++++++++++++++++++++++++++++++++++++ src/libcore/rt/uv/mod.rs | 9 +++- 2 files changed, 112 insertions(+), 2 deletions(-) create mode 100644 src/libcore/rt/uv/async.rs diff --git a/src/libcore/rt/uv/async.rs b/src/libcore/rt/uv/async.rs new file mode 100644 index 0000000000000..0d032f512d38b --- /dev/null +++ b/src/libcore/rt/uv/async.rs @@ -0,0 +1,105 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use libc::{c_int, c_void}; +use option::Some; +use rt::uv::uvll; +use rt::uv::uvll::UV_ASYNC; +use rt::uv::{Watcher, Loop, NativeHandle, AsyncCallback, NullCallback}; +use rt::uv::WatcherInterop; +use rt::uv::status_to_maybe_uv_error; + +pub struct AsyncWatcher(*uvll::uv_async_t); +impl Watcher for AsyncWatcher { } + +impl AsyncWatcher { + fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher { + unsafe { + let handle = uvll::malloc_handle(UV_ASYNC); + assert!(handle.is_not_null()); + let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + let data = watcher.get_watcher_data(); + data.async_cb = Some(cb); + assert_eq!(0, uvll::async_init(loop_.native_handle(), handle, async_cb)); + return watcher; + } + + extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { + let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); + let status = status_to_maybe_uv_error(watcher.native_handle(), status); + let data = watcher.get_watcher_data(); + let cb = data.async_cb.get_ref(); + (*cb)(watcher, status); + } + } + + fn send(&mut self) { + unsafe { + let handle = self.native_handle(); + uvll::async_send(handle); + } + } + + fn close(self, cb: NullCallback) { + let mut this = self; + let data = this.get_watcher_data(); + assert!(data.close_cb.is_none()); + data.close_cb = Some(cb); + + unsafe { + uvll::close(self.native_handle(), close_cb); + } + + extern fn close_cb(handle: *uvll::uv_stream_t) { + let mut watcher: AsyncWatcher = NativeHandle::from_native_handle(handle); + { + let data = watcher.get_watcher_data(); + data.close_cb.swap_unwrap()(); + } + watcher.drop_watcher_data(); + unsafe { uvll::free_handle(handle as *c_void); } + } + } +} + +impl NativeHandle<*uvll::uv_async_t> for AsyncWatcher { + fn from_native_handle(handle: *uvll::uv_async_t) -> AsyncWatcher { + AsyncWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_async_t { + match self { &AsyncWatcher(ptr) => ptr } + } +} + +#[cfg(test)] +mod test { + + use super::*; + use rt::uv::Loop; + use unstable::run_in_bare_thread; + use rt::thread::Thread; + use cell::Cell; + + #[test] + fn smoke_test() { + do run_in_bare_thread { + let mut loop_ = Loop::new(); + let watcher = AsyncWatcher::new(&mut loop_, |w, _| w.close(||()) ); + let watcher_cell = Cell(watcher); + let _thread = do Thread::start { + let mut watcher = watcher_cell.take(); + watcher.send(); + }; + loop_.run(); + loop_.close(); + } + } +} diff --git a/src/libcore/rt/uv/mod.rs b/src/libcore/rt/uv/mod.rs index 8cc596b2876d9..5f9e56608149f 100644 --- a/src/libcore/rt/uv/mod.rs +++ b/src/libcore/rt/uv/mod.rs @@ -57,6 +57,7 @@ pub use self::file::FsRequest; pub use self::net::{StreamWatcher, TcpWatcher}; pub use self::idle::IdleWatcher; pub use self::timer::TimerWatcher; +pub use self::async::AsyncWatcher; /// The implementation of `rtio` for libuv pub mod uvio; @@ -68,6 +69,7 @@ pub mod file; pub mod net; pub mod idle; pub mod timer; +pub mod async; /// XXX: Loop(*handle) is buggy with destructors. Normal structs /// with dtors may not be destructured, but tuple structs can, @@ -125,6 +127,7 @@ pub type IdleCallback = ~fn(IdleWatcher, Option); pub type ConnectionCallback = ~fn(StreamWatcher, Option); pub type FsCallback = ~fn(FsRequest, Option); pub type TimerCallback = ~fn(TimerWatcher, Option); +pub type AsyncCallback = ~fn(AsyncWatcher, Option); /// Callbacks used by StreamWatchers, set as custom data on the foreign handle @@ -135,7 +138,8 @@ struct WatcherData { close_cb: Option, alloc_cb: Option, idle_cb: Option, - timer_cb: Option + timer_cb: Option, + async_cb: Option } pub trait WatcherInterop { @@ -164,7 +168,8 @@ impl> WatcherInterop for W { close_cb: None, alloc_cb: None, idle_cb: None, - timer_cb: None + timer_cb: None, + async_cb: None }; let data = transmute::<~WatcherData, *c_void>(data); uvll::set_data_for_uv_handle(self.native_handle(), data); From 6d8d73cfc4cba2fdb2ee67448df39d89be08ce69 Mon Sep 17 00:00:00 2001 From: James Miller Date: Tue, 21 May 2013 17:31:24 +1200 Subject: [PATCH 003/111] Add AtomicUint newtype --- src/libcore/unstable/sync.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/libcore/unstable/sync.rs b/src/libcore/unstable/sync.rs index 734368c70c4a0..7c228ff564778 100644 --- a/src/libcore/unstable/sync.rs +++ b/src/libcore/unstable/sync.rs @@ -205,6 +205,26 @@ extern { fn rust_unlock_little_lock(lock: rust_little_lock); } +/* *********************************************************************/ + +//FIXME: #5042 This should be replaced by proper atomic type +pub struct AtomicUint(uint); +pub impl AtomicUint { + fn load(&self) -> uint { + unsafe { intrinsics::atomic_load(cast::transmute(self)) as uint } + } + fn store(&mut self, val:uint) { + unsafe { intrinsics::atomic_store(cast::transmute(self), val as int); } + } + fn add(&mut self, val:int) -> uint { + unsafe { intrinsics::atomic_xadd(cast::transmute(self), val as int) as uint } + } + fn cas(&self, old:uint, new:uint) -> uint { + unsafe { intrinsics::atomic_cxchg(cast::transmute(self), old as int, new as int) as uint } + } +} + + #[cfg(test)] mod tests { use comm; From 8f77a6f422184dacc14ae1b6a042c321e06bef88 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 21 May 2013 17:36:59 -0700 Subject: [PATCH 004/111] core: Add AtomicInt and cleanup --- src/libcore/unstable/sync.rs | 61 ++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/src/libcore/unstable/sync.rs b/src/libcore/unstable/sync.rs index 7c228ff564778..6085ca1a482ee 100644 --- a/src/libcore/unstable/sync.rs +++ b/src/libcore/unstable/sync.rs @@ -208,25 +208,50 @@ extern { /* *********************************************************************/ //FIXME: #5042 This should be replaced by proper atomic type -pub struct AtomicUint(uint); -pub impl AtomicUint { - fn load(&self) -> uint { +pub struct AtomicUint { + priv inner: uint +} + +impl AtomicUint { + pub fn new(val: uint) -> AtomicUint { AtomicUint { inner: val } } + pub fn load(&self) -> uint { unsafe { intrinsics::atomic_load(cast::transmute(self)) as uint } } - fn store(&mut self, val:uint) { + pub fn store(&mut self, val: uint) { unsafe { intrinsics::atomic_store(cast::transmute(self), val as int); } } - fn add(&mut self, val:int) -> uint { + pub fn add(&mut self, val: int) -> uint { unsafe { intrinsics::atomic_xadd(cast::transmute(self), val as int) as uint } } - fn cas(&self, old:uint, new:uint) -> uint { + pub fn cas(&mut self, old:uint, new: uint) -> uint { unsafe { intrinsics::atomic_cxchg(cast::transmute(self), old as int, new as int) as uint } } } +pub struct AtomicInt { + priv inner: int +} + +impl AtomicInt { + pub fn new(val: int) -> AtomicInt { AtomicInt { inner: val } } + pub fn load(&self) -> int { + unsafe { intrinsics::atomic_load(&self.inner) } + } + pub fn store(&mut self, val: int) { + unsafe { intrinsics::atomic_store(&mut self.inner, val); } + } + pub fn add(&mut self, val: int) -> int { + unsafe { intrinsics::atomic_xadd(&mut self.inner, val) } + } + pub fn cas(&mut self, old: int, new: int) -> int { + unsafe { intrinsics::atomic_cxchg(&mut self.inner, old, new) } + } +} + #[cfg(test)] mod tests { + use super::*; use comm; use super::exclusive; use task; @@ -278,4 +303,28 @@ mod tests { assert_eq!(*one, 1); } } + + #[test] + fn atomic_int_smoke_test() { + let mut i = AtomicInt::new(0); + i.store(10); + assert!(i.load() == 10); + assert!(i.add(1) == 10); + assert!(i.load() == 11); + assert!(i.cas(11, 12) == 11); + assert!(i.cas(11, 13) == 12); + assert!(i.load() == 12); + } + + #[test] + fn atomic_uint_smoke_test() { + let mut i = AtomicUint::new(0); + i.store(10); + assert!(i.load() == 10); + assert!(i.add(1) == 10); + assert!(i.load() == 11); + assert!(i.cas(11, 12) == 11); + assert!(i.cas(11, 13) == 12); + assert!(i.load() == 12); + } } From a0cd55a1d7436dc9532ddf5cdad7d1f7e8f108f3 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 20 May 2013 18:23:56 -0700 Subject: [PATCH 005/111] core::rt: Add RemoteCallback trait and uv implementation This is used for signalling the event loop from other threads. --- src/libcore/rt/rtio.rs | 11 +++++ src/libcore/rt/uv/async.rs | 6 +-- src/libcore/rt/uv/uvio.rs | 86 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 3 deletions(-) diff --git a/src/libcore/rt/rtio.rs b/src/libcore/rt/rtio.rs index 4b5eda22ff5de..fa657555f3aa0 100644 --- a/src/libcore/rt/rtio.rs +++ b/src/libcore/rt/rtio.rs @@ -18,6 +18,7 @@ use rt::uv::uvio; // XXX: ~object doesn't work currently so these are some placeholder // types to use instead pub type EventLoopObject = uvio::UvEventLoop; +pub type RemoteCallbackObject = uvio::UvRemoteCallback; pub type IoFactoryObject = uvio::UvIoFactory; pub type RtioTcpStreamObject = uvio::UvTcpStream; pub type RtioTcpListenerObject = uvio::UvTcpListener; @@ -26,10 +27,20 @@ pub trait EventLoop { fn run(&mut self); fn callback(&mut self, ~fn()); fn callback_ms(&mut self, ms: u64, ~fn()); + fn remote_callback(&mut self, ~fn()) -> ~RemoteCallbackObject; /// The asynchronous I/O services. Not all event loops may provide one fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject>; } +pub trait RemoteCallback { + /// Trigger the remote callback. Note that the number of times the callback + /// is run is not guaranteed. All that is guaranteed is that, after calling 'fire', + /// the callback will be called at least once, but multiple callbacks may be coalesced + /// and callbacks may be called more often requested. Destruction also triggers the + /// callback. + fn fire(&mut self); +} + pub trait IoFactory { fn tcp_connect(&mut self, addr: IpAddr) -> Result<~RtioTcpStreamObject, IoError>; fn tcp_bind(&mut self, addr: IpAddr) -> Result<~RtioTcpListenerObject, IoError>; diff --git a/src/libcore/rt/uv/async.rs b/src/libcore/rt/uv/async.rs index 0d032f512d38b..6ed06cc10b78a 100644 --- a/src/libcore/rt/uv/async.rs +++ b/src/libcore/rt/uv/async.rs @@ -20,7 +20,7 @@ pub struct AsyncWatcher(*uvll::uv_async_t); impl Watcher for AsyncWatcher { } impl AsyncWatcher { - fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher { + pub fn new(loop_: &mut Loop, cb: AsyncCallback) -> AsyncWatcher { unsafe { let handle = uvll::malloc_handle(UV_ASYNC); assert!(handle.is_not_null()); @@ -41,14 +41,14 @@ impl AsyncWatcher { } } - fn send(&mut self) { + pub fn send(&mut self) { unsafe { let handle = self.native_handle(); uvll::async_send(handle); } } - fn close(self, cb: NullCallback) { + pub fn close(self, cb: NullCallback) { let mut this = self; let data = this.get_watcher_data(); assert!(data.close_cb.is_none()); diff --git a/src/libcore/rt/uv/uvio.rs b/src/libcore/rt/uv/uvio.rs index cacd67314ebac..cf1bd568d0288 100644 --- a/src/libcore/rt/uv/uvio.rs +++ b/src/libcore/rt/uv/uvio.rs @@ -12,6 +12,7 @@ use option::*; use result::*; use ops::Drop; use cell::{Cell, empty_cell}; +use cast; use cast::transmute; use clone::Clone; use rt::io::IoError; @@ -23,6 +24,8 @@ use rt::sched::Scheduler; use rt::io::{standard_error, OtherIoError}; use rt::tube::Tube; use rt::local::Local; +use unstable::sync::{UnsafeAtomicRcBox, AtomicInt}; +use unstable::intrinsics; #[cfg(test)] use container::Container; #[cfg(test)] use uint; @@ -82,6 +85,10 @@ impl EventLoop for UvEventLoop { } } + fn remote_callback(&mut self, f: ~fn()) -> ~RemoteCallbackObject { + ~UvRemoteCallback::new(self.uvio.uv_loop(), f) + } + fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject> { Some(&mut self.uvio) } @@ -101,6 +108,85 @@ fn test_callback_run_once() { } } +pub struct UvRemoteCallback { + // The uv async handle for triggering the callback + async: AsyncWatcher, + // An atomic flag to tell the callback to exit, + // set from the dtor. + exit_flag: UnsafeAtomicRcBox +} + +impl UvRemoteCallback { + pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback { + let exit_flag = UnsafeAtomicRcBox::new(AtomicInt::new(0)); + let exit_flag_clone = exit_flag.clone(); + let async = do AsyncWatcher::new(loop_) |watcher, status| { + assert!(status.is_none()); + f(); + let exit_flag_ptr = exit_flag_clone.get(); + unsafe { + if (*exit_flag_ptr).load() == 1 { + watcher.close(||()); + } + } + }; + UvRemoteCallback { + async: async, + exit_flag: exit_flag + } + } +} + +impl RemoteCallback for UvRemoteCallback { + fn fire(&mut self) { self.async.send() } +} + +impl Drop for UvRemoteCallback { + fn finalize(&self) { + unsafe { + let mut this: &mut UvRemoteCallback = cast::transmute_mut(self); + let exit_flag_ptr = this.exit_flag.get(); + (*exit_flag_ptr).store(1); + this.async.send(); + } + } +} + +#[cfg(test)] +mod test_remote { + use super::*; + use cell; + use cell::Cell; + use rt::test::*; + use rt::thread::Thread; + use rt::tube::Tube; + use rt::rtio::EventLoop; + use rt::local::Local; + use rt::sched::Scheduler; + + #[test] + fn test_uv_remote() { + do run_in_newsched_task { + let mut tube = Tube::new(); + let tube_clone = tube.clone(); + let remote_cell = cell::empty_cell(); + do Local::borrow::() |sched| { + let tube_clone = tube_clone.clone(); + let tube_clone_cell = Cell(tube_clone); + let remote = do sched.event_loop.remote_callback { + tube_clone_cell.take().send(1); + }; + remote_cell.put_back(remote); + } + let _thread = do Thread::start { + remote_cell.take().fire(); + }; + + assert!(tube.recv() == 1); + } + } +} + pub struct UvIoFactory(Loop); pub impl UvIoFactory { From 41c21685dd149fb95dededfb4edaf87c6603c099 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 22 May 2013 15:39:39 -0700 Subject: [PATCH 006/111] core::rt: Add SchedHandle type --- src/libcore/rt/sched.rs | 101 +++++++++++++++++++++++++++++++--------- 1 file changed, 78 insertions(+), 23 deletions(-) diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index 50c6a894093f3..3f7b332e184b1 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -15,7 +15,7 @@ use cell::Cell; use super::work_queue::WorkQueue; use super::stack::{StackPool, StackSegment}; -use super::rtio::{EventLoop, EventLoopObject}; +use super::rtio::{EventLoop, EventLoopObject, RemoteCallbackObject}; use super::context::Context; use super::task::Task; use rt::local_ptr; @@ -41,16 +41,19 @@ pub struct Scheduler { priv cleanup_job: Option } -// XXX: Some hacks to put a &fn in Scheduler without borrowck -// complaining -type UnsafeTaskReceiver = sys::Closure; -trait ClosureConverter { - fn from_fn(&fn(~Coroutine)) -> Self; - fn to_fn(self) -> &fn(~Coroutine); +pub struct Coroutine { + /// The segment of stack on which the task is currently running or, + /// if the task is blocked, on which the task will resume execution + priv current_stack_segment: StackSegment, + /// These are always valid when the task is not running, unless + /// the task is dead + priv saved_context: Context, + /// The heap, GC, unwinding, local storage, logging + task: ~Task } -impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: &fn(~Coroutine)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } - fn to_fn(self) -> &fn(~Coroutine) { unsafe { transmute(self) } } + +pub struct SchedHandle { + priv remote: ~RemoteCallbackObject } enum CleanupJob { @@ -103,6 +106,17 @@ pub impl Scheduler { return sched; } + fn make_handle(&mut self) -> SchedHandle { + let remote = self.event_loop.remote_callback(wake_up); + + return SchedHandle { + remote: remote + }; + + fn wake_up() { + } + } + /// Schedule a task to be executed later. /// /// Pushes the task onto the work stealing queue and tells the event loop @@ -337,19 +351,6 @@ pub impl Scheduler { } } -static MIN_STACK_SIZE: uint = 10000000; // XXX: Too much stack - -pub struct Coroutine { - /// The segment of stack on which the task is currently running or, - /// if the task is blocked, on which the task will resume execution - priv current_stack_segment: StackSegment, - /// These are always valid when the task is not running, unless - /// the task is dead - priv saved_context: Context, - /// The heap, GC, unwinding, local storage, logging - task: ~Task -} - pub impl Coroutine { fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { Coroutine::with_task(stack_pool, ~Task::new(), start) @@ -358,6 +359,9 @@ pub impl Coroutine { fn with_task(stack_pool: &mut StackPool, task: ~Task, start: ~fn()) -> Coroutine { + + static MIN_STACK_SIZE: uint = 10000000; // XXX: Too much stack + let start = Coroutine::build_start_wrapper(start); let mut stack = stack_pool.take_segment(MIN_STACK_SIZE); // NB: Context holds a pointer to that ~fn @@ -401,6 +405,18 @@ pub impl Coroutine { } } +// XXX: Some hacks to put a &fn in Scheduler without borrowck +// complaining +type UnsafeTaskReceiver = sys::Closure; +trait ClosureConverter { + fn from_fn(&fn(~Coroutine)) -> Self; + fn to_fn(self) -> &fn(~Coroutine); +} +impl ClosureConverter for UnsafeTaskReceiver { + fn from_fn(f: &fn(~Coroutine)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } + fn to_fn(self) -> &fn(~Coroutine) { unsafe { transmute(self) } } +} + #[cfg(test)] mod test { use int; @@ -411,6 +427,7 @@ mod test { use rt::local::Local; use rt::test::*; use super::*; + use rt::thread::Thread; #[test] fn test_simple_scheduling() { @@ -551,4 +568,42 @@ mod test { } } } + + #[test] + fn handle() { + use rt::comm::*; + + do run_in_bare_thread { + let (port, chan) = oneshot::<()>(); + let port_cell = Cell(port); + let chan_cell = Cell(chan); + let mut sched1 = ~UvEventLoop::new_scheduler(); + let handle1 = sched1.make_handle(); + let handle1_cell = Cell(handle1); + let task1 = ~do Coroutine::new(&mut sched1.stack_pool) { + chan_cell.take().send(()); + }; + sched1.enqueue_task(task1); + + let mut sched2 = ~UvEventLoop::new_scheduler(); + let task2 = ~do Coroutine::new(&mut sched2.stack_pool) { + port_cell.take().recv(); + // Release the other scheduler's handle so it can exit + handle1_cell.take(); + }; + sched2.enqueue_task(task2); + + let sched1_cell = Cell(sched1); + let _thread1 = do Thread::start { + let mut sched1 = sched1_cell.take(); + sched1.run(); + }; + + let sched2_cell = Cell(sched2); + let _thread2 = do Thread::start { + let mut sched2 = sched2_cell.take(); + sched2.run(); + }; + } + } } From 8b7e392752eddc202dae12c6b89b7c59556990ce Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 22 May 2013 21:20:19 -0700 Subject: [PATCH 007/111] core::rt: Scheduler takes a WorkQueue This will be for implementing a work-sharing strategy --- src/libcore/rt/mod.rs | 4 +++- src/libcore/rt/sched.rs | 4 ++-- src/libcore/rt/uv/uvio.rs | 3 ++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index 2fac1df01a495..b6abab38da791 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -145,12 +145,14 @@ pub mod thread_local_storage; pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { use self::sched::{Scheduler, Coroutine}; + use self::work_queue::WorkQueue; use self::uv::uvio::UvEventLoop; init(crate_map); let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new(loop_); + let work_queue = WorkQueue::new(); + let mut sched = ~Scheduler::new(loop_, work_queue); let main_task = ~Coroutine::new(&mut sched.stack_pool, main); sched.enqueue_task(main_task); diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index 3f7b332e184b1..f1670d4896a49 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -65,14 +65,14 @@ pub impl Scheduler { fn in_task_context(&self) -> bool { self.current_task.is_some() } - fn new(event_loop: ~EventLoopObject) -> Scheduler { + fn new(event_loop: ~EventLoopObject, work_queue: WorkQueue<~Coroutine>) -> Scheduler { // Lazily initialize the runtime TLS key local_ptr::init_tls_key(); Scheduler { event_loop: event_loop, - work_queue: WorkQueue::new(), + work_queue: work_queue, stack_pool: StackPool::new(), saved_context: Context::empty(), current_task: None, diff --git a/src/libcore/rt/uv/uvio.rs b/src/libcore/rt/uv/uvio.rs index cf1bd568d0288..793a341bffbfb 100644 --- a/src/libcore/rt/uv/uvio.rs +++ b/src/libcore/rt/uv/uvio.rs @@ -24,6 +24,7 @@ use rt::sched::Scheduler; use rt::io::{standard_error, OtherIoError}; use rt::tube::Tube; use rt::local::Local; +use rt::work_queue::WorkQueue; use unstable::sync::{UnsafeAtomicRcBox, AtomicInt}; use unstable::intrinsics; @@ -45,7 +46,7 @@ pub impl UvEventLoop { /// A convenience constructor fn new_scheduler() -> Scheduler { - Scheduler::new(~UvEventLoop::new()) + Scheduler::new(~UvEventLoop::new(), WorkQueue::new()) } } From 7f107c415f1c88b016b9da0fa9c58e6b61f82589 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 22 May 2013 22:18:29 -0700 Subject: [PATCH 008/111] core::rt: Remove UvEventLoop::new_scheduler function --- src/libcore/rt/local.rs | 9 +++++---- src/libcore/rt/mod.rs | 3 ++- src/libcore/rt/sched.rs | 14 +++++++------- src/libcore/rt/test.rs | 12 ++++++++++-- src/libcore/rt/uv/uvio.rs | 5 ----- 5 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/libcore/rt/local.rs b/src/libcore/rt/local.rs index 64a384ddff0b9..b4ecf9cd06162 100644 --- a/src/libcore/rt/local.rs +++ b/src/libcore/rt/local.rs @@ -85,30 +85,31 @@ impl Local for IoFactoryObject { #[cfg(test)] mod test { + use rt::test::*; use rt::sched::Scheduler; use rt::uv::uvio::UvEventLoop; use super::*; #[test] fn thread_local_scheduler_smoke_test() { - let scheduler = ~UvEventLoop::new_scheduler(); + let scheduler = ~new_test_uv_sched(); Local::put(scheduler); let _scheduler: ~Scheduler = Local::take(); } #[test] fn thread_local_scheduler_two_instances() { - let scheduler = ~UvEventLoop::new_scheduler(); + let scheduler = ~new_test_uv_sched(); Local::put(scheduler); let _scheduler: ~Scheduler = Local::take(); - let scheduler = ~UvEventLoop::new_scheduler(); + let scheduler = ~new_test_uv_sched(); Local::put(scheduler); let _scheduler: ~Scheduler = Local::take(); } #[test] fn borrow_smoke_test() { - let scheduler = ~UvEventLoop::new_scheduler(); + let scheduler = ~new_test_uv_sched(); Local::put(scheduler); unsafe { let _scheduler: *mut Scheduler = Local::unsafe_borrow(); diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index b6abab38da791..f136732c00b93 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -223,11 +223,12 @@ fn test_context() { use rt::uv::uvio::UvEventLoop; use cell::Cell; use rt::local::Local; + use rt::test::new_test_uv_sched; assert_eq!(context(), OldTaskContext); do run_in_bare_thread { assert_eq!(context(), GlobalContext); - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let task = ~do Coroutine::new(&mut sched.stack_pool) { assert_eq!(context(), TaskContext); let sched = Local::take::(); diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index f1670d4896a49..78c5da08c39ba 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -435,7 +435,7 @@ mod test { let mut task_ran = false; let task_ran_ptr: *mut bool = &mut task_ran; - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let task = ~do Coroutine::new(&mut sched.stack_pool) { unsafe { *task_ran_ptr = true; } }; @@ -452,7 +452,7 @@ mod test { let mut task_count = 0; let task_count_ptr: *mut int = &mut task_count; - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); for int::range(0, total) |_| { let task = ~do Coroutine::new(&mut sched.stack_pool) { unsafe { *task_count_ptr = *task_count_ptr + 1; } @@ -470,7 +470,7 @@ mod test { let mut count = 0; let count_ptr: *mut int = &mut count; - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let task1 = ~do Coroutine::new(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; } let mut sched = Local::take::(); @@ -499,7 +499,7 @@ mod test { let mut count = 0; let count_ptr: *mut int = &mut count; - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let start_task = ~do Coroutine::new(&mut sched.stack_pool) { run_task(count_ptr); @@ -528,7 +528,7 @@ mod test { #[test] fn test_block_task() { do run_in_bare_thread { - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let task = ~do Coroutine::new(&mut sched.stack_pool) { let sched = Local::take::(); assert!(sched.in_task_context()); @@ -577,7 +577,7 @@ mod test { let (port, chan) = oneshot::<()>(); let port_cell = Cell(port); let chan_cell = Cell(chan); - let mut sched1 = ~UvEventLoop::new_scheduler(); + let mut sched1 = ~new_test_uv_sched(); let handle1 = sched1.make_handle(); let handle1_cell = Cell(handle1); let task1 = ~do Coroutine::new(&mut sched1.stack_pool) { @@ -585,7 +585,7 @@ mod test { }; sched1.enqueue_task(task1); - let mut sched2 = ~UvEventLoop::new_scheduler(); + let mut sched2 = ~new_test_uv_sched(); let task2 = ~do Coroutine::new(&mut sched2.stack_pool) { port_cell.take().recv(); // Release the other scheduler's handle so it can exit diff --git a/src/libcore/rt/test.rs b/src/libcore/rt/test.rs index c60ae2bfeffc8..0e2da452366cf 100644 --- a/src/libcore/rt/test.rs +++ b/src/libcore/rt/test.rs @@ -16,6 +16,14 @@ use super::io::net::ip::{IpAddr, Ipv4}; use rt::task::Task; use rt::thread::Thread; use rt::local::Local; +use rt::sched::Scheduler; + +pub fn new_test_uv_sched() -> Scheduler { + use rt::uv::uvio::UvEventLoop; + use rt::work_queue::WorkQueue; + + Scheduler::new(~UvEventLoop::new(), WorkQueue::new()) +} /// Creates a new scheduler in a new thread and runs a task in it, /// then waits for the scheduler to exit. Failure of the task @@ -28,7 +36,7 @@ pub fn run_in_newsched_task(f: ~fn()) { let f = Cell(f); do run_in_bare_thread { - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let task = ~Coroutine::with_task(&mut sched.stack_pool, ~Task::without_unwinding(), f.take()); @@ -155,7 +163,7 @@ pub fn spawntask_thread(f: ~fn()) -> Thread { let f = Cell(f); let thread = do Thread::start { - let mut sched = ~UvEventLoop::new_scheduler(); + let mut sched = ~new_test_uv_sched(); let task = ~Coroutine::with_task(&mut sched.stack_pool, ~Task::without_unwinding(), f.take()); diff --git a/src/libcore/rt/uv/uvio.rs b/src/libcore/rt/uv/uvio.rs index 793a341bffbfb..e25b6140abbfd 100644 --- a/src/libcore/rt/uv/uvio.rs +++ b/src/libcore/rt/uv/uvio.rs @@ -43,11 +43,6 @@ pub impl UvEventLoop { uvio: UvIoFactory(Loop::new()) } } - - /// A convenience constructor - fn new_scheduler() -> Scheduler { - Scheduler::new(~UvEventLoop::new(), WorkQueue::new()) - } } impl Drop for UvEventLoop { From 3f8095e55043f35e08adba5fe5b0a2d687ebc514 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 23 May 2013 00:04:50 -0700 Subject: [PATCH 009/111] core::rt: Add a very basic multi-threaded scheduling test --- src/libcore/rt/sched.rs | 72 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index 78c5da08c39ba..e78d50beebe1b 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -114,6 +114,8 @@ pub impl Scheduler { }; fn wake_up() { + let sched = Local::take::(); + sched.resume_task_from_queue(); } } @@ -127,8 +129,8 @@ pub impl Scheduler { self.event_loop.callback(resume_task_from_queue); fn resume_task_from_queue() { - let scheduler = Local::take::(); - scheduler.resume_task_from_queue(); + let sched = Local::take::(); + sched.resume_task_from_queue(); } } @@ -606,4 +608,70 @@ mod test { }; } } + + #[test] + fn multithreading() { + use clone::Clone; + use iter::Times; + use rt::work_queue::WorkQueue; + use rt::comm::*; + use container::Container; + use vec::OwnedVector; + use rt::rtio::RemoteCallback; + + do run_in_bare_thread { + let work_queue1 = WorkQueue::new(); + let work_queue2 = work_queue1.clone(); + + let loop1 = ~UvEventLoop::new(); + let mut sched1 = ~Scheduler::new(loop1, work_queue1.clone()); + let handle1 = sched1.make_handle(); + let sched1_cell = Cell(sched1); + let handle1_cell = Cell(handle1); + + let loop2 = ~UvEventLoop::new(); + let mut sched2 = ~Scheduler::new(loop2, work_queue2.clone()); + let handle2 = sched2.make_handle(); + let sched2_cell = Cell(sched2); + let handle2_cell = Cell(handle2); + + let _thread1 = do Thread::start { + let mut sched1 = sched1_cell.take(); + sched1.run(); + }; + + let _thread2 = do Thread::start { + let mut sched2 = sched2_cell.take(); + let handle1_cell = Cell(handle1_cell.take()); + let handle2_cell = Cell(handle2_cell.take()); + + let task = ~do Coroutine::new(&mut sched2.stack_pool) { + // Hold handles to keep the schedulers alive + let mut handle1 = handle1_cell.take(); + let mut handle2 = handle2_cell.take(); + + let mut ports = ~[]; + for 10.times { + let (port, chan) = oneshot(); + let chan_cell = Cell(chan); + do spawntask_later { + chan_cell.take().send(()); + } + ports.push(port); + + // Make sure the other scheduler is awake + handle1.remote.fire(); + handle2.remote.fire(); + } + + while !ports.is_empty() { + ports.pop().recv(); + } + }; + + sched2.enqueue_task(task); + sched2.run(); + }; + } + } } From dec9db10da062b1c528d46426d9f62e201d39bc6 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 28 May 2013 18:39:52 -0700 Subject: [PATCH 010/111] core::rt: Add SleeperList Just a simple place to stuff handles to sleeping schedulers. --- src/libcore/rt/mod.rs | 3 +++ src/libcore/rt/sleeper_list.rs | 46 ++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 src/libcore/rt/sleeper_list.rs diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index f136732c00b93..82496ec558940 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -88,6 +88,9 @@ mod work_queue; /// A parallel queue. mod message_queue; +/// A parallel data structure for tracking sleeping schedulers. +mod sleeper_list; + /// Stack segments and caching. mod stack; diff --git a/src/libcore/rt/sleeper_list.rs b/src/libcore/rt/sleeper_list.rs new file mode 100644 index 0000000000000..9507dec001d51 --- /dev/null +++ b/src/libcore/rt/sleeper_list.rs @@ -0,0 +1,46 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Maintains a shared list of sleeping schedulers. Schedulers +//! use this to wake each other up. + +use container::Container; +use vec::OwnedVector; +use option::{Option, Some, None}; +use cell::Cell; +use unstable::sync::{Exclusive, exclusive}; +use rt::sched::{Scheduler, SchedHandle}; + +pub struct SleeperList { + priv stack: ~Exclusive<~[SchedHandle]> +} + +impl SleeperList { + pub fn new() -> SleeperList { + SleeperList { + stack: ~exclusive(~[]) + } + } + + pub fn push(&mut self, handle: SchedHandle) { + let handle = Cell(handle); + self.stack.with(|s| s.push(handle.take())); + } + + pub fn pop(&mut self) -> Option { + do self.stack.with |s| { + if !s.is_empty() { + Some(s.pop()) + } else { + None + } + } + } +} From ed8c3594bc86dd366e729d02c34915c783e6ac81 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 28 May 2013 19:53:55 -0700 Subject: [PATCH 011/111] core::rt: Add SleeperList to Scheduler --- src/libcore/rt/mod.rs | 4 +++- src/libcore/rt/sched.rs | 20 +++++++++++++++++--- src/libcore/rt/sleeper_list.rs | 9 +++++++++ src/libcore/rt/test.rs | 3 ++- 4 files changed, 31 insertions(+), 5 deletions(-) diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index 82496ec558940..75036dcd28f8d 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -150,12 +150,14 @@ pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { use self::sched::{Scheduler, Coroutine}; use self::work_queue::WorkQueue; use self::uv::uvio::UvEventLoop; + use self::sleeper_list::SleeperList; init(crate_map); let loop_ = ~UvEventLoop::new(); let work_queue = WorkQueue::new(); - let mut sched = ~Scheduler::new(loop_, work_queue); + let sleepers = SleeperList::new(); + let mut sched = ~Scheduler::new(loop_, work_queue, sleepers); let main_task = ~Coroutine::new(&mut sched.stack_pool, main); sched.enqueue_task(main_task); diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index e78d50beebe1b..2a99648fa0459 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -13,6 +13,7 @@ use sys; use cast::transmute; use cell::Cell; +use super::sleeper_list::SleeperList; use super::work_queue::WorkQueue; use super::stack::{StackPool, StackSegment}; use super::rtio::{EventLoop, EventLoopObject, RemoteCallbackObject}; @@ -27,7 +28,12 @@ use rt::rtio::IoFactoryObject; /// thread local storage and the running task is owned by the /// scheduler. pub struct Scheduler { + /// A queue of available work. Under a work-stealing policy there + /// is one per Scheduler. priv work_queue: WorkQueue<~Coroutine>, + /// A shared list of sleeping schedulers. We'll use this to wake + /// up schedulers when pushing work onto the work queue. + priv sleeper_list: SleeperList, stack_pool: StackPool, /// The event loop used to drive the scheduler and perform I/O event_loop: ~EventLoopObject, @@ -65,12 +71,16 @@ pub impl Scheduler { fn in_task_context(&self) -> bool { self.current_task.is_some() } - fn new(event_loop: ~EventLoopObject, work_queue: WorkQueue<~Coroutine>) -> Scheduler { + fn new(event_loop: ~EventLoopObject, + work_queue: WorkQueue<~Coroutine>, + sleeper_list: SleeperList) + -> Scheduler { // Lazily initialize the runtime TLS key local_ptr::init_tls_key(); Scheduler { + sleeper_list: sleeper_list, event_loop: event_loop, work_queue: work_queue, stack_pool: StackPool::new(), @@ -618,19 +628,23 @@ mod test { use container::Container; use vec::OwnedVector; use rt::rtio::RemoteCallback; + use rt::sleeper_list::SleeperList; do run_in_bare_thread { + let sleepers1 = SleeperList::new(); let work_queue1 = WorkQueue::new(); + + let sleepers2 = sleepers1.clone(); let work_queue2 = work_queue1.clone(); let loop1 = ~UvEventLoop::new(); - let mut sched1 = ~Scheduler::new(loop1, work_queue1.clone()); + let mut sched1 = ~Scheduler::new(loop1, work_queue1.clone(), sleepers1); let handle1 = sched1.make_handle(); let sched1_cell = Cell(sched1); let handle1_cell = Cell(handle1); let loop2 = ~UvEventLoop::new(); - let mut sched2 = ~Scheduler::new(loop2, work_queue2.clone()); + let mut sched2 = ~Scheduler::new(loop2, work_queue2.clone(), sleepers2); let handle2 = sched2.make_handle(); let sched2_cell = Cell(sched2); let handle2_cell = Cell(handle2); diff --git a/src/libcore/rt/sleeper_list.rs b/src/libcore/rt/sleeper_list.rs index 9507dec001d51..dfcac8eb088f7 100644 --- a/src/libcore/rt/sleeper_list.rs +++ b/src/libcore/rt/sleeper_list.rs @@ -17,6 +17,7 @@ use option::{Option, Some, None}; use cell::Cell; use unstable::sync::{Exclusive, exclusive}; use rt::sched::{Scheduler, SchedHandle}; +use clone::Clone; pub struct SleeperList { priv stack: ~Exclusive<~[SchedHandle]> @@ -44,3 +45,11 @@ impl SleeperList { } } } + +impl Clone for SleeperList { + fn clone(&self) -> SleeperList { + SleeperList { + stack: self.stack.clone() + } + } +} \ No newline at end of file diff --git a/src/libcore/rt/test.rs b/src/libcore/rt/test.rs index 0e2da452366cf..d6896f5003437 100644 --- a/src/libcore/rt/test.rs +++ b/src/libcore/rt/test.rs @@ -21,8 +21,9 @@ use rt::sched::Scheduler; pub fn new_test_uv_sched() -> Scheduler { use rt::uv::uvio::UvEventLoop; use rt::work_queue::WorkQueue; + use rt::sleeper_list::SleeperList; - Scheduler::new(~UvEventLoop::new(), WorkQueue::new()) + Scheduler::new(~UvEventLoop::new(), WorkQueue::new(), SleeperList::new()) } /// Creates a new scheduler in a new thread and runs a task in it, From 5043ea269da73e96fbadc7c443aec01f087dabe9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 28 May 2013 23:35:22 -0700 Subject: [PATCH 012/111] core::rt: Add run_in_mt_newsched_task test function --- src/libcore/rt/test.rs | 63 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/src/libcore/rt/test.rs b/src/libcore/rt/test.rs index d6896f5003437..a66e4f09fe72b 100644 --- a/src/libcore/rt/test.rs +++ b/src/libcore/rt/test.rs @@ -9,14 +9,20 @@ // except according to those terms. use uint; -use option::*; +use option::{Option, Some, None}; use cell::Cell; +use clone::Clone; +use container::Container; +use vec::OwnedVector; use result::{Result, Ok, Err}; +use unstable::run_in_bare_thread; use super::io::net::ip::{IpAddr, Ipv4}; use rt::task::Task; use rt::thread::Thread; use rt::local::Local; -use rt::sched::Scheduler; +use rt::sched::{Scheduler, Coroutine}; +use rt::sleeper_list::SleeperList; +use rt::work_queue::WorkQueue; pub fn new_test_uv_sched() -> Scheduler { use rt::uv::uvio::UvEventLoop; @@ -46,6 +52,59 @@ pub fn run_in_newsched_task(f: ~fn()) { } } +/// Create more than one scheduler and run a function in a task +/// in one of the schedulers. The schedulers will stay alive +/// until the function `f` returns. +pub fn run_in_mt_newsched_task(f: ~fn()) { + use rt::uv::uvio::UvEventLoop; + + let f_cell = Cell(f); + + do run_in_bare_thread { + static N: uint = 2; + + let sleepers = SleeperList::new(); + let work_queue = WorkQueue::new(); + + let mut handles = ~[]; + let mut scheds = ~[]; + + for uint::range(0, N) |i| { + let loop_ = ~UvEventLoop::new(); + let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); + let handle = sched.make_handle(); + handles.push(handle); + scheds.push(sched); + } + + let f_cell = Cell(f_cell.take()); + let handles = handles; // Freeze + let main_task = ~do Coroutine::new(&mut scheds[0].stack_pool) { + f_cell.take()(); + // Hold on to handles until the function exits. This keeps the schedulers alive. + let _captured_handles = &handles; + }; + + scheds[0].enqueue_task(main_task); + + let mut threads = ~[]; + + while !scheds.is_empty() { + let sched = scheds.pop(); + let sched_cell = Cell(sched); + let thread = do Thread::start { + let mut sched = sched_cell.take(); + sched.run(); + }; + + threads.push(thread); + } + + // Wait for schedulers + let _threads = threads; + } +} + /// Test tasks will abort on failure instead of unwinding pub fn spawntask(f: ~fn()) { use super::sched::*; From a373dad74d0bd89a9d5362bba1059d9cc25afb9a Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 29 May 2013 15:55:23 -0700 Subject: [PATCH 013/111] core::rt: Outline the full multithreaded scheduling algo. Implement sleeping --- src/libcore/rt/message_queue.rs | 3 + src/libcore/rt/mod.rs | 1 + src/libcore/rt/sched.rs | 222 +++++++++++++++++++++----------- src/libcore/rt/test.rs | 17 ++- 4 files changed, 162 insertions(+), 81 deletions(-) diff --git a/src/libcore/rt/message_queue.rs b/src/libcore/rt/message_queue.rs index eaab9288ac8d0..21711bbe84c70 100644 --- a/src/libcore/rt/message_queue.rs +++ b/src/libcore/rt/message_queue.rs @@ -8,6 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +//! A concurrent queue that supports multiple producers and a +//! single consumer. + use container::Container; use kinds::Owned; use vec::OwnedVector; diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index 75036dcd28f8d..e23ad76a8c610 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -158,6 +158,7 @@ pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { let work_queue = WorkQueue::new(); let sleepers = SleeperList::new(); let mut sched = ~Scheduler::new(loop_, work_queue, sleepers); + sched.no_sleep = true; let main_task = ~Coroutine::new(&mut sched.stack_pool, main); sched.enqueue_task(main_task); diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index 2a99648fa0459..c6d6bb9f39e50 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -12,6 +12,7 @@ use option::*; use sys; use cast::transmute; use cell::Cell; +use clone::Clone; use super::sleeper_list::SleeperList; use super::work_queue::WorkQueue; @@ -19,9 +20,10 @@ use super::stack::{StackPool, StackSegment}; use super::rtio::{EventLoop, EventLoopObject, RemoteCallbackObject}; use super::context::Context; use super::task::Task; +use super::message_queue::MessageQueue; use rt::local_ptr; use rt::local::Local; -use rt::rtio::IoFactoryObject; +use rt::rtio::{IoFactoryObject, RemoteCallback}; /// The Scheduler is responsible for coordinating execution of Coroutines /// on a single thread. When the scheduler is running it is owned by @@ -31,9 +33,23 @@ pub struct Scheduler { /// A queue of available work. Under a work-stealing policy there /// is one per Scheduler. priv work_queue: WorkQueue<~Coroutine>, + /// The queue of incoming messages from other schedulers. + /// These are enqueued by SchedHandles after which a remote callback + /// is triggered to handle the message. + priv message_queue: MessageQueue, /// A shared list of sleeping schedulers. We'll use this to wake /// up schedulers when pushing work onto the work queue. priv sleeper_list: SleeperList, + /// Indicates that we have previously pushed a handle onto the + /// SleeperList but have not yet received the Wake message. + /// Being `true` does not necessarily mean that the scheduler is + /// not active since there are multiple event sources that may + /// wake the scheduler. It just prevents the scheduler from pushing + /// multiple handles onto the sleeper list. + priv sleepy: bool, + /// A flag to indicate we've received the shutdown message and should + /// no longer try to go to sleep, but exit instead. + no_sleep: bool, stack_pool: StackPool, /// The event loop used to drive the scheduler and perform I/O event_loop: ~EventLoopObject, @@ -47,6 +63,11 @@ pub struct Scheduler { priv cleanup_job: Option } +pub struct SchedHandle { + priv remote: ~RemoteCallbackObject, + priv queue: MessageQueue +} + pub struct Coroutine { /// The segment of stack on which the task is currently running or, /// if the task is blocked, on which the task will resume execution @@ -58,8 +79,9 @@ pub struct Coroutine { task: ~Task } -pub struct SchedHandle { - priv remote: ~RemoteCallbackObject +pub enum SchedMessage { + Wake, + Shutdown } enum CleanupJob { @@ -81,12 +103,15 @@ pub impl Scheduler { Scheduler { sleeper_list: sleeper_list, + message_queue: MessageQueue::new(), + sleepy: false, + no_sleep: false, event_loop: event_loop, work_queue: work_queue, stack_pool: StackPool::new(), saved_context: Context::empty(), current_task: None, - cleanup_job: None + cleanup_job: None, } } @@ -116,17 +141,51 @@ pub impl Scheduler { return sched; } + fn run_sched_once() { + + let sched = Local::take::(); + if sched.interpret_message_queue() { + // We performed a scheduling action. There may be other work + // to do yet, so let's try again later. + let mut sched = Local::take::(); + sched.event_loop.callback(Scheduler::run_sched_once); + Local::put(sched); + return; + } + + let sched = Local::take::(); + if sched.resume_task_from_queue() { + // We performed a scheduling action. There may be other work + // to do yet, so let's try again later. + let mut sched = Local::take::(); + sched.event_loop.callback(Scheduler::run_sched_once); + Local::put(sched); + return; + } + + // If we got here then there was no work to do. + // Generate a SchedHandle and push it to the sleeper list so + // somebody can wake us up later. + rtdebug!("no work to do"); + let mut sched = Local::take::(); + if !sched.sleepy && !sched.no_sleep { + rtdebug!("sleeping"); + sched.sleepy = true; + let handle = sched.make_handle(); + sched.sleeper_list.push(handle); + } else { + rtdebug!("not sleeping"); + } + Local::put(sched); + } + fn make_handle(&mut self) -> SchedHandle { - let remote = self.event_loop.remote_callback(wake_up); + let remote = self.event_loop.remote_callback(Scheduler::run_sched_once); return SchedHandle { - remote: remote + remote: remote, + queue: self.message_queue.clone() }; - - fn wake_up() { - let sched = Local::take::(); - sched.resume_task_from_queue(); - } } /// Schedule a task to be executed later. @@ -136,17 +195,63 @@ pub impl Scheduler { /// directly. fn enqueue_task(&mut self, task: ~Coroutine) { self.work_queue.push(task); - self.event_loop.callback(resume_task_from_queue); + self.event_loop.callback(Scheduler::run_sched_once); - fn resume_task_from_queue() { - let sched = Local::take::(); - sched.resume_task_from_queue(); + // We've made work available. Notify a sleeping scheduler. + match self.sleeper_list.pop() { + Some(handle) => { + let mut handle = handle; + handle.send(Wake) + } + None => (/* pass */) } } // * Scheduler-context operations - fn resume_task_from_queue(~self) { + fn interpret_message_queue(~self) -> bool { + assert!(!self.in_task_context()); + + rtdebug!("looking for scheduler messages"); + + let mut this = self; + match this.message_queue.pop() { + Some(Wake) => { + rtdebug!("recv Wake message"); + this.sleepy = false; + Local::put(this); + return true; + } + Some(Shutdown) => { + rtdebug!("recv Shutdown message"); + if this.sleepy { + // There may be an outstanding handle on the sleeper list. + // Pop them all to make sure that's not the case. + loop { + match this.sleeper_list.pop() { + Some(handle) => { + let mut handle = handle; + handle.send(Wake); + } + None => (/* pass */) + } + } + } + // No more sleeping. After there are no outstanding event loop + // references we will shut down. + this.no_sleep = true; + this.sleepy = false; + Local::put(this); + return true; + } + None => { + Local::put(this); + return false; + } + } + } + + fn resume_task_from_queue(~self) -> bool { assert!(!self.in_task_context()); rtdebug!("looking in work queue for task to schedule"); @@ -156,10 +261,12 @@ pub impl Scheduler { Some(task) => { rtdebug!("resuming task from work queue"); this.resume_task_immediately(task); + return true; } None => { rtdebug!("no tasks in queue"); Local::put(this); + return false; } } } @@ -363,6 +470,13 @@ pub impl Scheduler { } } +impl SchedHandle { + pub fn send(&mut self, msg: SchedMessage) { + self.queue.push(msg); + self.remote.fire(); + } +} + pub impl Coroutine { fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { Coroutine::with_task(stack_pool, ~Task::new(), start) @@ -621,71 +735,25 @@ mod test { #[test] fn multithreading() { - use clone::Clone; - use iter::Times; - use rt::work_queue::WorkQueue; use rt::comm::*; - use container::Container; + use iter::Times; use vec::OwnedVector; - use rt::rtio::RemoteCallback; - use rt::sleeper_list::SleeperList; - - do run_in_bare_thread { - let sleepers1 = SleeperList::new(); - let work_queue1 = WorkQueue::new(); - - let sleepers2 = sleepers1.clone(); - let work_queue2 = work_queue1.clone(); - - let loop1 = ~UvEventLoop::new(); - let mut sched1 = ~Scheduler::new(loop1, work_queue1.clone(), sleepers1); - let handle1 = sched1.make_handle(); - let sched1_cell = Cell(sched1); - let handle1_cell = Cell(handle1); - - let loop2 = ~UvEventLoop::new(); - let mut sched2 = ~Scheduler::new(loop2, work_queue2.clone(), sleepers2); - let handle2 = sched2.make_handle(); - let sched2_cell = Cell(sched2); - let handle2_cell = Cell(handle2); - - let _thread1 = do Thread::start { - let mut sched1 = sched1_cell.take(); - sched1.run(); - }; - - let _thread2 = do Thread::start { - let mut sched2 = sched2_cell.take(); - let handle1_cell = Cell(handle1_cell.take()); - let handle2_cell = Cell(handle2_cell.take()); - - let task = ~do Coroutine::new(&mut sched2.stack_pool) { - // Hold handles to keep the schedulers alive - let mut handle1 = handle1_cell.take(); - let mut handle2 = handle2_cell.take(); - - let mut ports = ~[]; - for 10.times { - let (port, chan) = oneshot(); - let chan_cell = Cell(chan); - do spawntask_later { - chan_cell.take().send(()); - } - ports.push(port); - - // Make sure the other scheduler is awake - handle1.remote.fire(); - handle2.remote.fire(); - } + use container::Container; - while !ports.is_empty() { - ports.pop().recv(); - } - }; + do run_in_mt_newsched_task { + let mut ports = ~[]; + for 10.times { + let (port, chan) = oneshot(); + let chan_cell = Cell(chan); + do spawntask_later { + chan_cell.take().send(()); + } + ports.push(port); + } - sched2.enqueue_task(task); - sched2.run(); - }; + while !ports.is_empty() { + ports.pop().recv(); + } } } } diff --git a/src/libcore/rt/test.rs b/src/libcore/rt/test.rs index a66e4f09fe72b..1bbfe8d473db3 100644 --- a/src/libcore/rt/test.rs +++ b/src/libcore/rt/test.rs @@ -13,6 +13,7 @@ use option::{Option, Some, None}; use cell::Cell; use clone::Clone; use container::Container; +use old_iter::MutableIter; use vec::OwnedVector; use result::{Result, Ok, Err}; use unstable::run_in_bare_thread; @@ -29,7 +30,10 @@ pub fn new_test_uv_sched() -> Scheduler { use rt::work_queue::WorkQueue; use rt::sleeper_list::SleeperList; - Scheduler::new(~UvEventLoop::new(), WorkQueue::new(), SleeperList::new()) + let mut sched = Scheduler::new(~UvEventLoop::new(), WorkQueue::new(), SleeperList::new()); + // Don't wait for the Shutdown message + sched.no_sleep = true; + return sched; } /// Creates a new scheduler in a new thread and runs a task in it, @@ -57,6 +61,7 @@ pub fn run_in_newsched_task(f: ~fn()) { /// until the function `f` returns. pub fn run_in_mt_newsched_task(f: ~fn()) { use rt::uv::uvio::UvEventLoop; + use rt::sched::Shutdown; let f_cell = Cell(f); @@ -78,11 +83,15 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { } let f_cell = Cell(f_cell.take()); - let handles = handles; // Freeze + let handles = Cell(handles); let main_task = ~do Coroutine::new(&mut scheds[0].stack_pool) { f_cell.take()(); - // Hold on to handles until the function exits. This keeps the schedulers alive. - let _captured_handles = &handles; + + let mut handles = handles.take(); + // Tell schedulers to exit + for handles.each_mut |handle| { + handle.send(Shutdown); + } }; scheds[0].enqueue_task(main_task); From f343e6172b7132545c72e3e09e6afccc06fdcee7 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 29 May 2013 17:25:29 -0700 Subject: [PATCH 014/111] core::rt: Fix an infinite recursion bug --- src/libcore/rt/comm.rs | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/src/libcore/rt/comm.rs b/src/libcore/rt/comm.rs index 576a402b70919..d108e20347a05 100644 --- a/src/libcore/rt/comm.rs +++ b/src/libcore/rt/comm.rs @@ -22,6 +22,7 @@ use ops::Drop; use kinds::Owned; use rt::sched::{Scheduler, Coroutine}; use rt::local::Local; +use rt::rtio::EventLoop; use unstable::intrinsics::{atomic_xchg, atomic_load}; use util::Void; use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable}; @@ -172,9 +173,17 @@ impl PortOne { } STATE_ONE => { // Channel is closed. Switch back and check the data. + // NB: We have to drop back into the scheduler event loop here + // instead of switching immediately back or we could end up + // triggering infinite recursion on the scheduler's stack. let task: ~Coroutine = cast::transmute(task_as_state); - let sched = Local::take::(); - sched.resume_task_immediately(task); + let task = Cell(task); + let mut sched = Local::take::(); + do sched.event_loop.callback { + let sched = Local::take::(); + sched.resume_task_immediately(task.take()); + } + Local::put(sched); } _ => util::unreachable() } @@ -614,5 +623,15 @@ mod test { } } } + + #[test] + fn recv_a_lot() { + // Regression test that we don't run out of stack in scheduler context + do run_in_newsched_task { + let (port, chan) = stream(); + for 10000.times { chan.send(()) } + for 10000.times { port.recv() } + } + } } From 134bb0f3eeed69bbf6dc672bbbfbc802f1a018a9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 29 May 2013 17:52:00 -0700 Subject: [PATCH 015/111] core::rt: Change the signature of context switching methods to avoid infinite recursion --- src/libcore/rt/comm.rs | 4 +-- src/libcore/rt/mod.rs | 7 ++--- src/libcore/rt/sched.rs | 63 +++++++++++++++++++-------------------- src/libcore/rt/test.rs | 32 ++++++-------------- src/libcore/rt/tube.rs | 34 +++++++++------------ src/libcore/rt/uv/uvio.rs | 26 +++++++--------- 6 files changed, 66 insertions(+), 100 deletions(-) diff --git a/src/libcore/rt/comm.rs b/src/libcore/rt/comm.rs index d108e20347a05..8ff3887f779c0 100644 --- a/src/libcore/rt/comm.rs +++ b/src/libcore/rt/comm.rs @@ -159,7 +159,7 @@ impl PortOne { // Switch to the scheduler to put the ~Task into the Packet state. let sched = Local::take::(); - do sched.deschedule_running_task_and_then |task| { + do sched.deschedule_running_task_and_then |sched, task| { unsafe { // Atomically swap the task pointer into the Packet state, issuing // an acquire barrier to prevent reordering of the subsequent read @@ -178,12 +178,10 @@ impl PortOne { // triggering infinite recursion on the scheduler's stack. let task: ~Coroutine = cast::transmute(task_as_state); let task = Cell(task); - let mut sched = Local::take::(); do sched.event_loop.callback { let sched = Local::take::(); sched.resume_task_immediately(task.take()); } - Local::put(sched); } _ => util::unreachable() } diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index e23ad76a8c610..1113d7abe7dcb 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -238,12 +238,9 @@ fn test_context() { let task = ~do Coroutine::new(&mut sched.stack_pool) { assert_eq!(context(), TaskContext); let sched = Local::take::(); - do sched.deschedule_running_task_and_then() |task| { + do sched.deschedule_running_task_and_then() |sched, task| { assert_eq!(context(), SchedulerContext); - let task = Cell(task); - do Local::borrow:: |sched| { - sched.enqueue_task(task.take()); - } + sched.enqueue_task(task); } }; sched.enqueue_task(task); diff --git a/src/libcore/rt/sched.rs b/src/libcore/rt/sched.rs index c6d6bb9f39e50..089c95cd7cd53 100644 --- a/src/libcore/rt/sched.rs +++ b/src/libcore/rt/sched.rs @@ -280,11 +280,9 @@ pub impl Scheduler { rtdebug!("ending running task"); - do self.deschedule_running_task_and_then |dead_task| { + do self.deschedule_running_task_and_then |sched, dead_task| { let dead_task = Cell(dead_task); - do Local::borrow:: |sched| { - dead_task.take().recycle(&mut sched.stack_pool); - } + dead_task.take().recycle(&mut sched.stack_pool); } abort!("control reached end of task"); @@ -293,22 +291,18 @@ pub impl Scheduler { fn schedule_new_task(~self, task: ~Coroutine) { assert!(self.in_task_context()); - do self.switch_running_tasks_and_then(task) |last_task| { + do self.switch_running_tasks_and_then(task) |sched, last_task| { let last_task = Cell(last_task); - do Local::borrow:: |sched| { - sched.enqueue_task(last_task.take()); - } + sched.enqueue_task(last_task.take()); } } fn schedule_task(~self, task: ~Coroutine) { assert!(self.in_task_context()); - do self.switch_running_tasks_and_then(task) |last_task| { + do self.switch_running_tasks_and_then(task) |sched, last_task| { let last_task = Cell(last_task); - do Local::borrow:: |sched| { - sched.enqueue_task(last_task.take()); - } + sched.enqueue_task(last_task.take()); } } @@ -352,7 +346,11 @@ pub impl Scheduler { /// The closure here is a *stack* closure that lives in the /// running task. It gets transmuted to the scheduler's lifetime /// and called while the task is blocked. - fn deschedule_running_task_and_then(~self, f: &fn(~Coroutine)) { + /// + /// This passes a Scheduler pointer to the fn after the context switch + /// in order to prevent that fn from performing further scheduling operations. + /// Doing further scheduling could easily result in infinite recursion. + fn deschedule_running_task_and_then(~self, f: &fn(&mut Scheduler, ~Coroutine)) { let mut this = self; assert!(this.in_task_context()); @@ -360,7 +358,8 @@ pub impl Scheduler { unsafe { let blocked_task = this.current_task.swap_unwrap(); - let f_fake_region = transmute::<&fn(~Coroutine), &fn(~Coroutine)>(f); + let f_fake_region = transmute::<&fn(&mut Scheduler, ~Coroutine), + &fn(&mut Scheduler, ~Coroutine)>(f); let f_opaque = ClosureConverter::from_fn(f_fake_region); this.enqueue_cleanup_job(GiveTask(blocked_task, f_opaque)); } @@ -382,14 +381,18 @@ pub impl Scheduler { /// Switch directly to another task, without going through the scheduler. /// You would want to think hard about doing this, e.g. if there are /// pending I/O events it would be a bad idea. - fn switch_running_tasks_and_then(~self, next_task: ~Coroutine, f: &fn(~Coroutine)) { + fn switch_running_tasks_and_then(~self, next_task: ~Coroutine, + f: &fn(&mut Scheduler, ~Coroutine)) { let mut this = self; assert!(this.in_task_context()); rtdebug!("switching tasks"); let old_running_task = this.current_task.swap_unwrap(); - let f_fake_region = unsafe { transmute::<&fn(~Coroutine), &fn(~Coroutine)>(f) }; + let f_fake_region = unsafe { + transmute::<&fn(&mut Scheduler, ~Coroutine), + &fn(&mut Scheduler, ~Coroutine)>(f) + }; let f_opaque = ClosureConverter::from_fn(f_fake_region); this.enqueue_cleanup_job(GiveTask(old_running_task, f_opaque)); this.current_task = Some(next_task); @@ -426,7 +429,7 @@ pub impl Scheduler { let cleanup_job = self.cleanup_job.swap_unwrap(); match cleanup_job { DoNothing => { } - GiveTask(task, f) => (f.to_fn())(task) + GiveTask(task, f) => (f.to_fn())(self, task) } } @@ -535,12 +538,12 @@ pub impl Coroutine { // complaining type UnsafeTaskReceiver = sys::Closure; trait ClosureConverter { - fn from_fn(&fn(~Coroutine)) -> Self; - fn to_fn(self) -> &fn(~Coroutine); + fn from_fn(&fn(&mut Scheduler, ~Coroutine)) -> Self; + fn to_fn(self) -> &fn(&mut Scheduler, ~Coroutine); } impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: &fn(~Coroutine)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } - fn to_fn(self) -> &fn(~Coroutine) { unsafe { transmute(self) } } + fn from_fn(f: &fn(&mut Scheduler, ~Coroutine)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } + fn to_fn(self) -> &fn(&mut Scheduler, ~Coroutine) { unsafe { transmute(self) } } } #[cfg(test)] @@ -604,11 +607,9 @@ mod test { unsafe { *count_ptr = *count_ptr + 1; } }; // Context switch directly to the new task - do sched.switch_running_tasks_and_then(task2) |task1| { + do sched.switch_running_tasks_and_then(task2) |sched, task1| { let task1 = Cell(task1); - do Local::borrow:: |sched| { - sched.enqueue_task(task1.take()); - } + sched.enqueue_task(task1.take()); } unsafe { *count_ptr = *count_ptr + 1; } }; @@ -658,12 +659,10 @@ mod test { let task = ~do Coroutine::new(&mut sched.stack_pool) { let sched = Local::take::(); assert!(sched.in_task_context()); - do sched.deschedule_running_task_and_then() |task| { + do sched.deschedule_running_task_and_then() |sched, task| { let task = Cell(task); - do Local::borrow:: |sched| { - assert!(!sched.in_task_context()); - sched.enqueue_task(task.take()); - } + assert!(!sched.in_task_context()); + sched.enqueue_task(task.take()); } }; sched.enqueue_task(task); @@ -680,8 +679,7 @@ mod test { do run_in_newsched_task { do spawn { let sched = Local::take::(); - do sched.deschedule_running_task_and_then |task| { - let mut sched = Local::take::(); + do sched.deschedule_running_task_and_then |sched, task| { let task = Cell(task); do sched.event_loop.callback_ms(10) { rtdebug!("in callback"); @@ -689,7 +687,6 @@ mod test { sched.enqueue_task(task.take()); Local::put(sched); } - Local::put(sched); } } } diff --git a/src/libcore/rt/test.rs b/src/libcore/rt/test.rs index 1bbfe8d473db3..16b0aef5e266b 100644 --- a/src/libcore/rt/test.rs +++ b/src/libcore/rt/test.rs @@ -122,11 +122,7 @@ pub fn spawntask(f: ~fn()) { let task = ~Coroutine::with_task(&mut sched.stack_pool, ~Task::without_unwinding(), f); - do sched.switch_running_tasks_and_then(task) |task| { - let task = Cell(task); - let sched = Local::take::(); - sched.schedule_new_task(task.take()); - } + sched.schedule_new_task(task); } /// Create a new task and run it right now. Aborts on failure @@ -137,11 +133,8 @@ pub fn spawntask_immediately(f: ~fn()) { let task = ~Coroutine::with_task(&mut sched.stack_pool, ~Task::without_unwinding(), f); - do sched.switch_running_tasks_and_then(task) |task| { - let task = Cell(task); - do Local::borrow:: |sched| { - sched.enqueue_task(task.take()); - } + do sched.switch_running_tasks_and_then(task) |sched, task| { + sched.enqueue_task(task); } } @@ -172,11 +165,8 @@ pub fn spawntask_random(f: ~fn()) { f); if run_now { - do sched.switch_running_tasks_and_then(task) |task| { - let task = Cell(task); - do Local::borrow:: |sched| { - sched.enqueue_task(task.take()); - } + do sched.switch_running_tasks_and_then(task) |sched, task| { + sched.enqueue_task(task); } } else { sched.enqueue_task(task); @@ -199,10 +189,9 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { // Switch to the scheduler let f = Cell(Cell(f)); let sched = Local::take::(); - do sched.deschedule_running_task_and_then() |old_task| { + do sched.deschedule_running_task_and_then() |sched, old_task| { let old_task = Cell(old_task); let f = f.take(); - let mut sched = Local::take::(); let new_task = ~do Coroutine::new(&mut sched.stack_pool) { do (|| { (f.take())() @@ -210,16 +199,13 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { // Check for failure then resume the parent task unsafe { *failed_ptr = task::failing(); } let sched = Local::take::(); - do sched.switch_running_tasks_and_then(old_task.take()) |new_task| { - let new_task = Cell(new_task); - do Local::borrow:: |sched| { - sched.enqueue_task(new_task.take()); - } + do sched.switch_running_tasks_and_then(old_task.take()) |sched, new_task| { + sched.enqueue_task(new_task); } } }; - sched.resume_task_immediately(new_task); + sched.enqueue_task(new_task); } if !failed { Ok(()) } else { Err(()) } diff --git a/src/libcore/rt/tube.rs b/src/libcore/rt/tube.rs index b2f475a696605..4482a92d916aa 100644 --- a/src/libcore/rt/tube.rs +++ b/src/libcore/rt/tube.rs @@ -72,7 +72,7 @@ impl Tube { assert!(self.p.refcount() > 1); // There better be somebody to wake us up assert!((*state).blocked_task.is_none()); let sched = Local::take::(); - do sched.deschedule_running_task_and_then |task| { + do sched.deschedule_running_task_and_then |_, task| { (*state).blocked_task = Some(task); } rtdebug!("waking after tube recv"); @@ -107,11 +107,10 @@ mod test { let tube_clone = tube.clone(); let tube_clone_cell = Cell(tube_clone); let sched = Local::take::(); - do sched.deschedule_running_task_and_then |task| { + do sched.deschedule_running_task_and_then |sched, task| { let mut tube_clone = tube_clone_cell.take(); tube_clone.send(1); - let sched = Local::take::(); - sched.resume_task_immediately(task); + sched.enqueue_task(task); } assert!(tube.recv() == 1); @@ -123,21 +122,17 @@ mod test { do run_in_newsched_task { let mut tube: Tube = Tube::new(); let tube_clone = tube.clone(); - let tube_clone = Cell(Cell(Cell(tube_clone))); + let tube_clone = Cell(tube_clone); let sched = Local::take::(); - do sched.deschedule_running_task_and_then |task| { - let tube_clone = tube_clone.take(); - do Local::borrow:: |sched| { - let tube_clone = tube_clone.take(); - do sched.event_loop.callback { - let mut tube_clone = tube_clone.take(); - // The task should be blocked on this now and - // sending will wake it up. - tube_clone.send(1); - } + do sched.deschedule_running_task_and_then |sched, task| { + let tube_clone = Cell(tube_clone.take()); + do sched.event_loop.callback { + let mut tube_clone = tube_clone.take(); + // The task should be blocked on this now and + // sending will wake it up. + tube_clone.send(1); } - let sched = Local::take::(); - sched.resume_task_immediately(task); + sched.enqueue_task(task); } assert!(tube.recv() == 1); @@ -153,7 +148,7 @@ mod test { let tube_clone = tube.clone(); let tube_clone = Cell(tube_clone); let sched = Local::take::(); - do sched.deschedule_running_task_and_then |task| { + do sched.deschedule_running_task_and_then |sched, task| { callback_send(tube_clone.take(), 0); fn callback_send(tube: Tube, i: int) { @@ -172,8 +167,7 @@ mod test { } } - let sched = Local::take::(); - sched.resume_task_immediately(task); + sched.enqueue_task(task); } for int::range(0, MAX) |i| { diff --git a/src/libcore/rt/uv/uvio.rs b/src/libcore/rt/uv/uvio.rs index e25b6140abbfd..1ee6504d11fc5 100644 --- a/src/libcore/rt/uv/uvio.rs +++ b/src/libcore/rt/uv/uvio.rs @@ -205,12 +205,10 @@ impl IoFactory for UvIoFactory { assert!(scheduler.in_task_context()); // Block this task and take ownership, switch to scheduler context - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |sched, task| { rtdebug!("connect: entered scheduler context"); - do Local::borrow:: |scheduler| { - assert!(!scheduler.in_task_context()); - } + assert!(!sched.in_task_context()); let mut tcp_watcher = TcpWatcher::new(self.uv_loop()); let task_cell = Cell(task); @@ -250,7 +248,7 @@ impl IoFactory for UvIoFactory { Ok(_) => Ok(~UvTcpListener::new(watcher)), Err(uverr) => { let scheduler = Local::take::(); - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |_, task| { let task_cell = Cell(task); do watcher.as_stream().close { let scheduler = Local::take::(); @@ -286,7 +284,7 @@ impl Drop for UvTcpListener { fn finalize(&self) { let watcher = self.watcher(); let scheduler = Local::take::(); - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |_, task| { let task_cell = Cell(task); do watcher.as_stream().close { let scheduler = Local::take::(); @@ -348,7 +346,7 @@ impl Drop for UvTcpStream { rtdebug!("closing tcp stream"); let watcher = self.watcher(); let scheduler = Local::take::(); - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |_, task| { let task_cell = Cell(task); do watcher.close { let scheduler = Local::take::(); @@ -367,11 +365,9 @@ impl RtioTcpStream for UvTcpStream { assert!(scheduler.in_task_context()); let watcher = self.watcher(); let buf_ptr: *&mut [u8] = &buf; - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |sched, task| { rtdebug!("read: entered scheduler context"); - do Local::borrow:: |scheduler| { - assert!(!scheduler.in_task_context()); - } + assert!(!sched.in_task_context()); let mut watcher = watcher; let task_cell = Cell(task); // XXX: We shouldn't reallocate these callbacks every @@ -413,7 +409,7 @@ impl RtioTcpStream for UvTcpStream { assert!(scheduler.in_task_context()); let watcher = self.watcher(); let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |_, task| { let mut watcher = watcher; let task_cell = Cell(task); let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; @@ -507,11 +503,9 @@ fn test_read_and_block() { // Yield to the other task in hopes that it // will trigger a read callback while we are // not ready for it - do scheduler.deschedule_running_task_and_then |task| { + do scheduler.deschedule_running_task_and_then |sched, task| { let task = Cell(task); - do Local::borrow:: |scheduler| { - scheduler.enqueue_task(task.take()); - } + sched.enqueue_task(task.take()); } } From ca2eebd5dd8ceea1da77b6a6f4fb8c68462a400b Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 29 May 2013 21:03:21 -0700 Subject: [PATCH 016/111] core::rt: Add some notes about optimizations --- src/libstd/rt/sched.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 089c95cd7cd53..75b5306644116 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -29,6 +29,9 @@ use rt::rtio::{IoFactoryObject, RemoteCallback}; /// on a single thread. When the scheduler is running it is owned by /// thread local storage and the running task is owned by the /// scheduler. +/// +/// XXX: This creates too many callbacks to run_sched_once, resulting +/// in too much allocation and too many events. pub struct Scheduler { /// A queue of available work. Under a work-stealing policy there /// is one per Scheduler. @@ -143,6 +146,10 @@ pub impl Scheduler { fn run_sched_once() { + // First, check the message queue for instructions. + // XXX: perf. Check for messages without atomics. + // It's ok if we miss messages occasionally, as long as + // we sync and check again before sleeping. let sched = Local::take::(); if sched.interpret_message_queue() { // We performed a scheduling action. There may be other work @@ -153,6 +160,7 @@ pub impl Scheduler { return; } + // Now, look in the work queue for tasks to run let sched = Local::take::(); if sched.resume_task_from_queue() { // We performed a scheduling action. There may be other work @@ -198,6 +206,12 @@ pub impl Scheduler { self.event_loop.callback(Scheduler::run_sched_once); // We've made work available. Notify a sleeping scheduler. + // XXX: perf. Check for a sleeper without synchronizing memory. + // It's not critical that we always find it. + // XXX: perf. If there's a sleeper then we might as well just send + // it the task directly instead of pushing it to the + // queue. That is essentially the intent here and it is less + // work. match self.sleeper_list.pop() { Some(handle) => { let mut handle = handle; From 8eb358bb00f161f9e289de6cad8cfecc4c6eb681 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 29 May 2013 22:38:15 -0700 Subject: [PATCH 017/111] core::rt: Begin recording scheduler metrics --- src/libstd/rt/comm.rs | 23 ++++++++--- src/libstd/rt/metrics.rs | 88 ++++++++++++++++++++++++++++++++++++++++ src/libstd/rt/mod.rs | 2 + src/libstd/rt/sched.rs | 18 +++++++- 4 files changed, 123 insertions(+), 8 deletions(-) create mode 100644 src/libstd/rt/metrics.rs diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index ebfa9e263ef80..19fb809d4378e 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -119,8 +119,16 @@ impl ChanOne { match oldstate { STATE_BOTH => { // Port is not waiting yet. Nothing to do + do Local::borrow:: |sched| { + rtdebug!("non-rendezvous send"); + sched.metrics.non_rendezvous_sends += 1; + } } STATE_ONE => { + do Local::borrow:: |sched| { + rtdebug!("rendezvous send"); + sched.metrics.rendezvous_sends += 1; + } // Port has closed. Need to clean up. let _packet: ~Packet = cast::transmute(this.inner.void_packet); recvr_active = false; @@ -128,7 +136,9 @@ impl ChanOne { task_as_state => { // Port is blocked. Wake it up. let recvr: ~Coroutine = cast::transmute(task_as_state); - let sched = Local::take::(); + let mut sched = Local::take::(); + rtdebug!("rendezvous send"); + sched.metrics.rendezvous_sends += 1; sched.schedule_task(recvr); } } @@ -170,18 +180,19 @@ impl PortOne { match oldstate { STATE_BOTH => { // Data has not been sent. Now we're blocked. + rtdebug!("non-rendezvous recv"); + sched.metrics.non_rendezvous_recvs += 1; } STATE_ONE => { + rtdebug!("rendezvous recv"); + sched.metrics.rendezvous_recvs += 1; + // Channel is closed. Switch back and check the data. // NB: We have to drop back into the scheduler event loop here // instead of switching immediately back or we could end up // triggering infinite recursion on the scheduler's stack. let task: ~Coroutine = cast::transmute(task_as_state); - let task = Cell(task); - do sched.event_loop.callback { - let sched = Local::take::(); - sched.resume_task_immediately(task.take()); - } + sched.enqueue_task(task); } _ => util::unreachable() } diff --git a/src/libstd/rt/metrics.rs b/src/libstd/rt/metrics.rs new file mode 100644 index 0000000000000..70e347fdfb6ac --- /dev/null +++ b/src/libstd/rt/metrics.rs @@ -0,0 +1,88 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use to_str::ToStr; + +pub struct SchedMetrics { + // The number of times executing `run_sched_once`. + turns: uint, + // The number of turns that received a message. + messages_received: uint, + // The number of turns that ran a task from the queue. + tasks_resumed_from_queue: uint, + // The number of turns that found no work to perform. + wasted_turns: uint, + // The number of times the scheduler went to sleep. + sleepy_times: uint, + // Context switches from the scheduler into a task. + context_switches_sched_to_task: uint, + // Context switches from a task into the scheduler. + context_switches_task_to_sched: uint, + // Context switches from a task to a task. + context_switches_task_to_task: uint, + // Message sends that unblock the receiver + rendezvous_sends: uint, + // Message sends that do not unblock the receiver + non_rendezvous_sends: uint, + // Message receives that do not block the receiver + rendezvous_recvs: uint, + // Message receives that block the receiver + non_rendezvous_recvs: uint +} + +impl SchedMetrics { + pub fn new() -> SchedMetrics { + SchedMetrics { + turns: 0, + messages_received: 0, + tasks_resumed_from_queue: 0, + wasted_turns: 0, + sleepy_times: 0, + context_switches_sched_to_task: 0, + context_switches_task_to_sched: 0, + context_switches_task_to_task: 0, + rendezvous_sends: 0, + non_rendezvous_sends: 0, + rendezvous_recvs: 0, + non_rendezvous_recvs: 0 + } + } +} + +impl ToStr for SchedMetrics { + fn to_str(&self) -> ~str { + fmt!("turns: %u\n\ + messages_received: %u\n\ + tasks_resumed_from_queue: %u\n\ + wasted_turns: %u\n\ + sleepy_times: %u\n\ + context_switches_sched_to_task: %u\n\ + context_switches_task_to_sched: %u\n\ + context_switches_task_to_task: %u\n\ + rendezvous_sends: %u\n\ + non_rendezvous_sends: %u\n\ + rendezvous_recvs: %u\n\ + non_rendezvous_recvs: %u\n\ + ", + self.turns, + self.messages_received, + self.tasks_resumed_from_queue, + self.wasted_turns, + self.sleepy_times, + self.context_switches_sched_to_task, + self.context_switches_task_to_sched, + self.context_switches_task_to_task, + self.rendezvous_sends, + self.non_rendezvous_sends, + self.rendezvous_recvs, + self.non_rendezvous_recvs + ) + } +} \ No newline at end of file diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 1113d7abe7dcb..23dc757800222 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -130,6 +130,8 @@ pub mod local_ptr; /// Bindings to pthread/windows thread-local storage. pub mod thread_local_storage; +pub mod metrics; + /// Set up a default runtime configuration, given compiler-supplied arguments. /// diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 75b5306644116..b5b8bb732e7fd 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -13,6 +13,7 @@ use sys; use cast::transmute; use cell::Cell; use clone::Clone; +use to_str::ToStr; use super::sleeper_list::SleeperList; use super::work_queue::WorkQueue; @@ -24,6 +25,7 @@ use super::message_queue::MessageQueue; use rt::local_ptr; use rt::local::Local; use rt::rtio::{IoFactoryObject, RemoteCallback}; +use rt::metrics::SchedMetrics; /// The Scheduler is responsible for coordinating execution of Coroutines /// on a single thread. When the scheduler is running it is owned by @@ -63,7 +65,8 @@ pub struct Scheduler { current_task: Option<~Coroutine>, /// An action performed after a context switch on behalf of the /// code running before the context switch - priv cleanup_job: Option + priv cleanup_job: Option, + metrics: SchedMetrics } pub struct SchedHandle { @@ -115,6 +118,7 @@ pub impl Scheduler { saved_context: Context::empty(), current_task: None, cleanup_job: None, + metrics: SchedMetrics::new() } } @@ -141,20 +145,24 @@ pub impl Scheduler { let sched = Local::take::(); assert!(sched.work_queue.is_empty()); + rtdebug!("scheduler metrics: %s\n", sched.metrics.to_str()); return sched; } fn run_sched_once() { + let mut sched = Local::take::(); + sched.metrics.turns += 1; + // First, check the message queue for instructions. // XXX: perf. Check for messages without atomics. // It's ok if we miss messages occasionally, as long as // we sync and check again before sleeping. - let sched = Local::take::(); if sched.interpret_message_queue() { // We performed a scheduling action. There may be other work // to do yet, so let's try again later. let mut sched = Local::take::(); + sched.metrics.messages_received += 1; sched.event_loop.callback(Scheduler::run_sched_once); Local::put(sched); return; @@ -166,6 +174,7 @@ pub impl Scheduler { // We performed a scheduling action. There may be other work // to do yet, so let's try again later. let mut sched = Local::take::(); + sched.metrics.tasks_resumed_from_queue += 1; sched.event_loop.callback(Scheduler::run_sched_once); Local::put(sched); return; @@ -176,8 +185,10 @@ pub impl Scheduler { // somebody can wake us up later. rtdebug!("no work to do"); let mut sched = Local::take::(); + sched.metrics.wasted_turns += 1; if !sched.sleepy && !sched.no_sleep { rtdebug!("sleeping"); + sched.metrics.sleepy_times += 1; sched.sleepy = true; let handle = sched.make_handle(); sched.sleeper_list.push(handle); @@ -327,6 +338,7 @@ pub impl Scheduler { assert!(!this.in_task_context()); rtdebug!("scheduling a task"); + this.metrics.context_switches_sched_to_task += 1; // Store the task in the scheduler so it can be grabbed later this.current_task = Some(task); @@ -369,6 +381,7 @@ pub impl Scheduler { assert!(this.in_task_context()); rtdebug!("blocking task"); + this.metrics.context_switches_task_to_sched += 1; unsafe { let blocked_task = this.current_task.swap_unwrap(); @@ -401,6 +414,7 @@ pub impl Scheduler { assert!(this.in_task_context()); rtdebug!("switching tasks"); + this.metrics.context_switches_task_to_task += 1; let old_running_task = this.current_task.swap_unwrap(); let f_fake_region = unsafe { From 053b38e7e1cba8f7bb649a5fc8d82b0448d33c55 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 30 May 2013 00:18:49 -0700 Subject: [PATCH 018/111] core::rt: Fix two multithreading bugs and add a threadring test This properly distributes the load now --- src/libstd/rt/sched.rs | 66 +++++++++++++++++++++++++++++++++++++++++- src/libstd/rt/test.rs | 2 +- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index b5b8bb732e7fd..a57a87ffba774 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -131,6 +131,11 @@ pub impl Scheduler { let mut self_sched = self; + // Always run through the scheduler loop at least once so that + // we enter the sleep state and can then be woken up by other + // schedulers. + self_sched.event_loop.callback(Scheduler::run_sched_once); + unsafe { let event_loop: *mut ~EventLoopObject = { let event_loop: *mut ~EventLoopObject = &mut self_sched.event_loop; @@ -258,7 +263,7 @@ pub impl Scheduler { let mut handle = handle; handle.send(Wake); } - None => (/* pass */) + None => break } } } @@ -781,4 +786,63 @@ mod test { } } } + + #[test] + fn thread_ring() { + use rt::comm::*; + use iter::Times; + use vec::OwnedVector; + use container::Container; + use comm::{GenericPort, GenericChan}; + + do run_in_mt_newsched_task { + let (end_port, end_chan) = oneshot(); + + let n_tasks = 10; + let token = 2000; + + let mut (p, ch1) = stream(); + ch1.send((token, end_chan)); + let mut i = 2; + while i <= n_tasks { + let (next_p, ch) = stream(); + let imm_i = i; + let imm_p = p; + do spawntask_random { + roundtrip(imm_i, n_tasks, &imm_p, &ch); + }; + p = next_p; + i += 1; + } + let imm_p = p; + let imm_ch = ch1; + do spawntask_random { + roundtrip(1, n_tasks, &imm_p, &imm_ch); + } + + end_port.recv(); + } + + fn roundtrip(id: int, n_tasks: int, + p: &Port<(int, ChanOne<()>)>, ch: &Chan<(int, ChanOne<()>)>) { + while (true) { + match p.recv() { + (1, end_chan) => { + debug!("%d\n", id); + end_chan.send(()); + return; + } + (token, end_chan) => { + debug!("thread: %d got token: %d", id, token); + ch.send((token - 1, end_chan)); + if token <= n_tasks { + return; + } + } + } + } + } + + } + } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 16b0aef5e266b..e05e2004e0b21 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -66,7 +66,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let f_cell = Cell(f); do run_in_bare_thread { - static N: uint = 2; + static N: uint = 4; let sleepers = SleeperList::new(); let work_queue = WorkQueue::new(); From ea633b42aeadf807a10036a87bf2903123250152 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 30 May 2013 13:20:17 -0700 Subject: [PATCH 019/111] core::rt: deny(unused_imports, unused_mut, unused_variable) --- src/libstd/rt/comm.rs | 1 - src/libstd/rt/local.rs | 1 - src/libstd/rt/mod.rs | 5 +++-- src/libstd/rt/sched.rs | 11 +++-------- src/libstd/rt/sleeper_list.rs | 2 +- src/libstd/rt/test.rs | 8 +++----- src/libstd/rt/uv/uvio.rs | 5 +---- 7 files changed, 11 insertions(+), 22 deletions(-) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index 19fb809d4378e..26d02fb6640ac 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -22,7 +22,6 @@ use ops::Drop; use kinds::Owned; use rt::sched::{Scheduler, Coroutine}; use rt::local::Local; -use rt::rtio::EventLoop; use unstable::intrinsics::{atomic_xchg, atomic_load}; use util::Void; use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable}; diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index ffff54f00bbe7..e6988c538881a 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -87,7 +87,6 @@ impl Local for IoFactoryObject { mod test { use rt::test::*; use rt::sched::Scheduler; - use rt::uv::uvio::UvEventLoop; use super::*; #[test] diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 23dc757800222..caf3e15e535af 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -55,6 +55,9 @@ Several modules in `core` are clients of `rt`: */ #[doc(hidden)]; +#[deny(unused_imports)]; +#[deny(unused_mut)]; +#[deny(unused_variable)]; use ptr::Ptr; @@ -228,8 +231,6 @@ pub fn context() -> RuntimeContext { fn test_context() { use unstable::run_in_bare_thread; use self::sched::{Scheduler, Coroutine}; - use rt::uv::uvio::UvEventLoop; - use cell::Cell; use rt::local::Local; use rt::test::new_test_uv_sched; diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index a57a87ffba774..b0080a010140d 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -13,7 +13,6 @@ use sys; use cast::transmute; use cell::Cell; use clone::Clone; -use to_str::ToStr; use super::sleeper_list::SleeperList; use super::work_queue::WorkQueue; @@ -24,7 +23,7 @@ use super::task::Task; use super::message_queue::MessageQueue; use rt::local_ptr; use rt::local::Local; -use rt::rtio::{IoFactoryObject, RemoteCallback}; +use rt::rtio::RemoteCallback; use rt::metrics::SchedMetrics; /// The Scheduler is responsible for coordinating execution of Coroutines @@ -583,7 +582,6 @@ impl ClosureConverter for UnsafeTaskReceiver { mod test { use int; use cell::Cell; - use rt::uv::uvio::UvEventLoop; use unstable::run_in_bare_thread; use task::spawn; use rt::local::Local; @@ -751,13 +749,13 @@ mod test { let sched1_cell = Cell(sched1); let _thread1 = do Thread::start { - let mut sched1 = sched1_cell.take(); + let sched1 = sched1_cell.take(); sched1.run(); }; let sched2_cell = Cell(sched2); let _thread2 = do Thread::start { - let mut sched2 = sched2_cell.take(); + let sched2 = sched2_cell.take(); sched2.run(); }; } @@ -790,9 +788,6 @@ mod test { #[test] fn thread_ring() { use rt::comm::*; - use iter::Times; - use vec::OwnedVector; - use container::Container; use comm::{GenericPort, GenericChan}; do run_in_mt_newsched_task { diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs index dfcac8eb088f7..e2873e78d805f 100644 --- a/src/libstd/rt/sleeper_list.rs +++ b/src/libstd/rt/sleeper_list.rs @@ -16,7 +16,7 @@ use vec::OwnedVector; use option::{Option, Some, None}; use cell::Cell; use unstable::sync::{Exclusive, exclusive}; -use rt::sched::{Scheduler, SchedHandle}; +use rt::sched::SchedHandle; use clone::Clone; pub struct SleeperList { diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index e05e2004e0b21..907d289fb0748 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -9,7 +9,7 @@ // except according to those terms. use uint; -use option::{Option, Some, None}; +use option::{Some, None}; use cell::Cell; use clone::Clone; use container::Container; @@ -42,7 +42,6 @@ pub fn new_test_uv_sched() -> Scheduler { pub fn run_in_newsched_task(f: ~fn()) { use super::sched::*; use unstable::run_in_bare_thread; - use rt::uv::uvio::UvEventLoop; let f = Cell(f); @@ -74,7 +73,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let mut handles = ~[]; let mut scheds = ~[]; - for uint::range(0, N) |i| { + for uint::range(0, N) |_| { let loop_ = ~UvEventLoop::new(); let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); let handle = sched.make_handle(); @@ -102,7 +101,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let sched = scheds.pop(); let sched_cell = Cell(sched); let thread = do Thread::start { - let mut sched = sched_cell.take(); + let sched = sched_cell.take(); sched.run(); }; @@ -214,7 +213,6 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { // Spawn a new task in a new scheduler and return a thread handle. pub fn spawntask_thread(f: ~fn()) -> Thread { use rt::sched::*; - use rt::uv::uvio::UvEventLoop; let f = Cell(f); let thread = do Thread::start { diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 1ee6504d11fc5..0d9530239a3d9 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -24,9 +24,7 @@ use rt::sched::Scheduler; use rt::io::{standard_error, OtherIoError}; use rt::tube::Tube; use rt::local::Local; -use rt::work_queue::WorkQueue; use unstable::sync::{UnsafeAtomicRcBox, AtomicInt}; -use unstable::intrinsics; #[cfg(test)] use container::Container; #[cfg(test)] use uint; @@ -140,7 +138,7 @@ impl RemoteCallback for UvRemoteCallback { impl Drop for UvRemoteCallback { fn finalize(&self) { unsafe { - let mut this: &mut UvRemoteCallback = cast::transmute_mut(self); + let this: &mut UvRemoteCallback = cast::transmute_mut(self); let exit_flag_ptr = this.exit_flag.get(); (*exit_flag_ptr).store(1); this.async.send(); @@ -150,7 +148,6 @@ impl Drop for UvRemoteCallback { #[cfg(test)] mod test_remote { - use super::*; use cell; use cell::Cell; use rt::test::*; From e2bedb1b868a634885df9f8a277bec1915c98fc2 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 1 Jun 2013 02:11:38 -0700 Subject: [PATCH 020/111] core: Make atomic methods public --- src/libstd/unstable/atomics.rs | 62 +++++++++++++++++----------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/src/libstd/unstable/atomics.rs b/src/libstd/unstable/atomics.rs index ab2b5d8ea2b3b..58d0c01f990d0 100644 --- a/src/libstd/unstable/atomics.rs +++ b/src/libstd/unstable/atomics.rs @@ -75,7 +75,7 @@ pub enum Ordering { impl AtomicFlag { - fn new() -> AtomicFlag { + pub fn new() -> AtomicFlag { AtomicFlag { v: 0 } } @@ -83,7 +83,7 @@ impl AtomicFlag { * Clears the atomic flag */ #[inline(always)] - fn clear(&mut self, order: Ordering) { + pub fn clear(&mut self, order: Ordering) { unsafe {atomic_store(&mut self.v, 0, order)} } @@ -92,37 +92,37 @@ impl AtomicFlag { * flag. */ #[inline(always)] - fn test_and_set(&mut self, order: Ordering) -> bool { + pub fn test_and_set(&mut self, order: Ordering) -> bool { unsafe {atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0} } } impl AtomicBool { - fn new(v: bool) -> AtomicBool { + pub fn new(v: bool) -> AtomicBool { AtomicBool { v: if v { 1 } else { 0 } } } #[inline(always)] - fn load(&self, order: Ordering) -> bool { + pub fn load(&self, order: Ordering) -> bool { unsafe { atomic_load(&self.v, order) > 0 } } #[inline(always)] - fn store(&mut self, val: bool, order: Ordering) { + pub fn store(&mut self, val: bool, order: Ordering) { let val = if val { 1 } else { 0 }; unsafe { atomic_store(&mut self.v, val, order); } } #[inline(always)] - fn swap(&mut self, val: bool, order: Ordering) -> bool { + pub fn swap(&mut self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; unsafe { atomic_swap(&mut self.v, val, order) > 0} } #[inline(always)] - fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { + pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { let old = if old { 1 } else { 0 }; let new = if new { 1 } else { 0 }; @@ -131,105 +131,105 @@ impl AtomicBool { } impl AtomicInt { - fn new(v: int) -> AtomicInt { + pub fn new(v: int) -> AtomicInt { AtomicInt { v:v } } #[inline(always)] - fn load(&self, order: Ordering) -> int { + pub fn load(&self, order: Ordering) -> int { unsafe { atomic_load(&self.v, order) } } #[inline(always)] - fn store(&mut self, val: int, order: Ordering) { + pub fn store(&mut self, val: int, order: Ordering) { unsafe { atomic_store(&mut self.v, val, order); } } #[inline(always)] - fn swap(&mut self, val: int, order: Ordering) -> int { + pub fn swap(&mut self, val: int, order: Ordering) -> int { unsafe { atomic_swap(&mut self.v, val, order) } } #[inline(always)] - fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { + pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } } #[inline(always)] - fn fetch_add(&mut self, val: int, order: Ordering) -> int { + pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { unsafe { atomic_add(&mut self.v, val, order) } } #[inline(always)] - fn fetch_sub(&mut self, val: int, order: Ordering) -> int { + pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { unsafe { atomic_sub(&mut self.v, val, order) } } } impl AtomicUint { - fn new(v: uint) -> AtomicUint { + pub fn new(v: uint) -> AtomicUint { AtomicUint { v:v } } #[inline(always)] - fn load(&self, order: Ordering) -> uint { + pub fn load(&self, order: Ordering) -> uint { unsafe { atomic_load(&self.v, order) } } #[inline(always)] - fn store(&mut self, val: uint, order: Ordering) { + pub fn store(&mut self, val: uint, order: Ordering) { unsafe { atomic_store(&mut self.v, val, order); } } #[inline(always)] - fn swap(&mut self, val: uint, order: Ordering) -> uint { + pub fn swap(&mut self, val: uint, order: Ordering) -> uint { unsafe { atomic_swap(&mut self.v, val, order) } } #[inline(always)] - fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { + pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } } #[inline(always)] - fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { + pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { unsafe { atomic_add(&mut self.v, val, order) } } #[inline(always)] - fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { + pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { unsafe { atomic_sub(&mut self.v, val, order) } } } impl AtomicPtr { - fn new(p: *mut T) -> AtomicPtr { + pub fn new(p: *mut T) -> AtomicPtr { AtomicPtr { p:p } } #[inline(always)] - fn load(&self, order: Ordering) -> *mut T { + pub fn load(&self, order: Ordering) -> *mut T { unsafe { atomic_load(&self.p, order) } } #[inline(always)] - fn store(&mut self, ptr: *mut T, order: Ordering) { + pub fn store(&mut self, ptr: *mut T, order: Ordering) { unsafe { atomic_store(&mut self.p, ptr, order); } } #[inline(always)] - fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { + pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { unsafe { atomic_swap(&mut self.p, ptr, order) } } #[inline(always)] - fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { atomic_compare_and_swap(&mut self.p, old, new, order) } } } impl AtomicOption { - fn new(p: ~T) -> AtomicOption { + pub fn new(p: ~T) -> AtomicOption { unsafe { AtomicOption { p: cast::transmute(p) @@ -237,7 +237,7 @@ impl AtomicOption { } } - fn empty() -> AtomicOption { + pub fn empty() -> AtomicOption { unsafe { AtomicOption { p: cast::transmute(0) @@ -246,7 +246,7 @@ impl AtomicOption { } #[inline(always)] - fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { + pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { unsafe { let val = cast::transmute(val); @@ -262,7 +262,7 @@ impl AtomicOption { } #[inline(always)] - fn take(&mut self, order: Ordering) -> Option<~T> { + pub fn take(&mut self, order: Ordering) -> Option<~T> { unsafe { self.swap(cast::transmute(0), order) } From 2e6d51f9cea14ff271223855454034b27ced4ce9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sun, 2 Jun 2013 02:13:24 -0700 Subject: [PATCH 021/111] std::rt: Use AtomicUint instead of intrinsics in comm --- src/libstd/rt/comm.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index 26d02fb6640ac..7f93dae00b7c6 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -22,7 +22,7 @@ use ops::Drop; use kinds::Owned; use rt::sched::{Scheduler, Coroutine}; use rt::local::Local; -use unstable::intrinsics::{atomic_xchg, atomic_load}; +use unstable::atomics::{AtomicUint, SeqCst}; use util::Void; use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable}; use cell::Cell; @@ -34,14 +34,14 @@ use cell::Cell; /// * 2 - both endpoints are alive /// * 1 - either the sender or the receiver is dead, determined by context /// * - A pointer to a blocked Task that can be transmuted to ~Task -type State = int; +type State = uint; static STATE_BOTH: State = 2; static STATE_ONE: State = 1; /// The heap-allocated structure shared between two endpoints. struct Packet { - state: State, + state: AtomicUint, payload: Option, } @@ -70,7 +70,7 @@ pub struct PortOneHack { pub fn oneshot() -> (PortOne, ChanOne) { let packet: ~Packet = ~Packet { - state: STATE_BOTH, + state: AtomicUint::new(STATE_BOTH), payload: None }; @@ -114,7 +114,7 @@ impl ChanOne { // reordering of the payload write. This also issues an // acquire barrier that keeps the subsequent access of the // ~Task pointer from being reordered. - let oldstate = atomic_xchg(&mut (*packet).state, STATE_ONE); + let oldstate = (*packet).state.swap(STATE_ONE, SeqCst); match oldstate { STATE_BOTH => { // Port is not waiting yet. Nothing to do @@ -175,7 +175,7 @@ impl PortOne { // of the payload. Also issues a release barrier to prevent reordering // of any previous writes to the task structure. let task_as_state: State = cast::transmute(task); - let oldstate = atomic_xchg(&mut (*packet).state, task_as_state); + let oldstate = (*packet).state.swap(task_as_state, SeqCst); match oldstate { STATE_BOTH => { // Data has not been sent. Now we're blocked. @@ -227,7 +227,7 @@ impl Peekable for PortOne { fn peek(&self) -> bool { unsafe { let packet: *mut Packet = self.inner.packet(); - let oldstate = atomic_load(&mut (*packet).state); + let oldstate = (*packet).state.load(SeqCst); match oldstate { STATE_BOTH => false, STATE_ONE => (*packet).payload.is_some(), @@ -244,7 +244,7 @@ impl Drop for ChanOneHack { unsafe { let this = cast::transmute_mut(self); - let oldstate = atomic_xchg(&mut (*this.packet()).state, STATE_ONE); + let oldstate = (*this.packet()).state.swap(STATE_ONE, SeqCst); match oldstate { STATE_BOTH => { // Port still active. It will destroy the Packet. @@ -271,7 +271,7 @@ impl Drop for PortOneHack { unsafe { let this = cast::transmute_mut(self); - let oldstate = atomic_xchg(&mut (*this.packet()).state, STATE_ONE); + let oldstate = (*this.packet()).state.swap(STATE_ONE, SeqCst); match oldstate { STATE_BOTH => { // Chan still active. It will destroy the packet. From f7e242ab8a4ceffd87ec339086b7f8510e94aef1 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 3 Jun 2013 18:58:26 -0700 Subject: [PATCH 022/111] std::rt: Destroy the task start closure while in task context --- src/libstd/rt/sched.rs | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index b0080a010140d..1d1c3aae1f1a3 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -536,6 +536,7 @@ pub impl Coroutine { priv fn build_start_wrapper(start: ~fn()) -> ~fn() { // XXX: The old code didn't have this extra allocation + let start_cell = Cell(start); let wrapper: ~fn() = || { // This is the first code to execute after the initial // context switch to the task. The previous context may @@ -547,7 +548,19 @@ pub impl Coroutine { let sched = Local::unsafe_borrow::(); let task = (*sched).current_task.get_mut_ref(); // FIXME #6141: shouldn't neet to put `start()` in another closure - task.task.run(||start()); + let start_cell = Cell(start_cell.take()); + do task.task.run { + // N.B. Removing `start` from the start wrapper closure + // by emptying a cell is critical for correctness. The ~Task + // pointer, and in turn the closure used to initialize the first + // call frame, is destroyed in scheduler context, not task context. + // So any captured closures must not contain user-definable dtors + // that expect to be in task context. By moving `start` out of + // the closure, all the user code goes out of scope while + // the task is still running. + let start = start_cell.take(); + start(); + }; } let sched = Local::take::(); @@ -840,4 +853,26 @@ mod test { } + #[test] + fn start_closure_dtor() { + use ops::Drop; + + // Regression test that the `start` task entrypoint can contain dtors + // that use task resources + do run_in_newsched_task { + struct S { field: () } + + impl Drop for S { + fn finalize(&self) { + let _foo = @0; + } + } + + let s = S { field: () }; + + do spawntask { + let _ss = &s; + } + } + } } From 1507df87ccc93091ef5d918dc2c660f2e6f5a928 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 3 Jun 2013 19:15:45 -0700 Subject: [PATCH 023/111] std::rt: Remove in incorrect assert --- src/libstd/rt/sched.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 1d1c3aae1f1a3..df231f6d88aec 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -148,7 +148,9 @@ pub impl Scheduler { } let sched = Local::take::(); - assert!(sched.work_queue.is_empty()); + // XXX: Reenable this once we're using a per-task queue. With a shared + // queue this is not true + //assert!(sched.work_queue.is_empty()); rtdebug!("scheduler metrics: %s\n", sched.metrics.to_str()); return sched; } From 422f663a988370a93a6ae21b92215e49750c2e87 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 1 Jun 2013 02:11:59 -0700 Subject: [PATCH 024/111] core::rt: Implement SharedChan --- src/libstd/rt/comm.rs | 67 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 3 deletions(-) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index 7f93dae00b7c6..b97a4df224576 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -22,10 +22,12 @@ use ops::Drop; use kinds::Owned; use rt::sched::{Scheduler, Coroutine}; use rt::local::Local; -use unstable::atomics::{AtomicUint, SeqCst}; +use unstable::atomics::{AtomicUint, AtomicOption, SeqCst}; +use unstable::sync::UnsafeAtomicRcBox; use util::Void; use comm::{GenericChan, GenericSmartChan, GenericPort, Peekable}; use cell::Cell; +use clone::Clone; /// A combined refcount / ~Task pointer. /// @@ -312,16 +314,19 @@ struct StreamPayload { next: PortOne> } +type StreamChanOne = ChanOne>; +type StreamPortOne = PortOne>; + /// A channel with unbounded size. pub struct Chan { // FIXME #5372. Using Cell because we don't take &mut self - next: Cell>> + next: Cell> } /// An port with unbounded size. pub struct Port { // FIXME #5372. Using Cell because we don't take &mut self - next: Cell>> + next: Cell> } pub fn stream() -> (Port, Chan) { @@ -374,6 +379,43 @@ impl Peekable for Port { } } +pub struct SharedChan { + // Just like Chan, but a shared AtomicOption instead of Cell + priv next: UnsafeAtomicRcBox>> +} + +impl SharedChan { + pub fn new(chan: Chan) -> SharedChan { + let next = chan.next.take(); + let next = AtomicOption::new(~next); + SharedChan { next: UnsafeAtomicRcBox::new(next) } + } +} + +impl GenericChan for SharedChan { + fn send(&self, val: T) { + self.try_send(val); + } +} + +impl GenericSmartChan for SharedChan { + fn try_send(&self, val: T) -> bool { + unsafe { + let (next_pone, next_cone) = oneshot(); + let cone = (*self.next.get()).swap(~next_cone, SeqCst); + cone.unwrap().try_send(StreamPayload { val: val, next: next_pone }) + } + } +} + +impl Clone for SharedChan { + fn clone(&self) -> SharedChan { + SharedChan { + next: self.next.clone() + } + } +} + #[cfg(test)] mod test { use super::*; @@ -641,5 +683,24 @@ mod test { for 10000.times { port.recv() } } } + + #[test] + fn shared_chan_stress() { + do run_in_mt_newsched_task { + let (port, chan) = stream(); + let chan = SharedChan::new(chan); + let total = stress_factor() + 100; + for total.times { + let chan_clone = chan.clone(); + do spawntask_random { + chan_clone.send(()); + } + } + + for total.times { + port.recv(); + } + } + } } From 51d257fd9a6c3ce9bd02f9e30d15d91d39a5aee9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 1 Jun 2013 13:34:05 -0700 Subject: [PATCH 025/111] core::rt: Add SharedPort --- src/libstd/rt/comm.rs | 132 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index b97a4df224576..4772a8596bfb6 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -416,6 +416,61 @@ impl Clone for SharedChan { } } +pub struct SharedPort { + // The next port on which we will receive the next port on which we will receive T + priv next_link: UnsafeAtomicRcBox>>> +} + +impl SharedPort { + pub fn new(port: Port) -> SharedPort { + // Put the data port into a new link pipe + let next_data_port = port.next.take(); + let (next_link_port, next_link_chan) = oneshot(); + next_link_chan.send(next_data_port); + let next_link = AtomicOption::new(~next_link_port); + SharedPort { next_link: UnsafeAtomicRcBox::new(next_link) } + } +} + +impl GenericPort for SharedPort { + fn recv(&self) -> T { + match self.try_recv() { + Some(val) => val, + None => { + fail!("receiving on a closed channel"); + } + } + } + + fn try_recv(&self) -> Option { + unsafe { + let (next_link_port, next_link_chan) = oneshot(); + let link_port = (*self.next_link.get()).swap(~next_link_port, SeqCst); + let link_port = link_port.unwrap(); + let data_port = link_port.recv(); + let (next_data_port, res) = match data_port.try_recv() { + Some(StreamPayload { val, next }) => { + (next, Some(val)) + } + None => { + let (next_data_port, _) = oneshot(); + (next_data_port, None) + } + }; + next_link_chan.send(next_data_port); + return res; + } + } +} + +impl Clone for SharedPort { + fn clone(&self) -> SharedPort { + SharedPort { + next_link: self.next_link.clone() + } + } +} + #[cfg(test)] mod test { use super::*; @@ -702,5 +757,82 @@ mod test { } } } + + #[test] + fn shared_port_stress() { + do run_in_mt_newsched_task { + // XXX: Removing these type annotations causes an ICE + let (end_port, end_chan) = stream::<()>(); + let (port, chan) = stream::<()>(); + let end_chan = SharedChan::new(end_chan); + let port = SharedPort::new(port); + let total = stress_factor() + 100; + for total.times { + let end_chan_clone = end_chan.clone(); + let port_clone = port.clone(); + do spawntask_random { + port_clone.recv(); + end_chan_clone.send(()); + } + } + + for total.times { + chan.send(()); + } + + for total.times { + end_port.recv(); + } + } + } + + #[test] + fn shared_port_close_simple() { + do run_in_mt_newsched_task { + let (port, chan) = stream::<()>(); + let port = SharedPort::new(port); + { let _chan = chan; } + assert!(port.try_recv().is_none()); + } + } + + #[test] + fn shared_port_close() { + do run_in_mt_newsched_task { + let (end_port, end_chan) = stream::(); + let (port, chan) = stream::<()>(); + let end_chan = SharedChan::new(end_chan); + let port = SharedPort::new(port); + let chan = SharedChan::new(chan); + let send_total = 10; + let recv_total = 20; + do spawntask_random { + for send_total.times { + let chan_clone = chan.clone(); + do spawntask_random { + chan_clone.send(()); + } + } + } + let end_chan_clone = end_chan.clone(); + do spawntask_random { + for recv_total.times { + let port_clone = port.clone(); + let end_chan_clone = end_chan_clone.clone(); + do spawntask_random { + let recvd = port_clone.try_recv().is_some(); + end_chan_clone.send(recvd); + } + } + } + + let mut recvd = 0; + for recv_total.times { + recvd += if end_port.recv() { 1 } else { 0 }; + } + + assert!(recvd == send_total); + } + } } From ece38b3c7e16be1bedb45e552a127fe75bdb726a Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 1 Jun 2013 14:03:38 -0700 Subject: [PATCH 026/111] core::rt: Add `MegaPipe`, an unbounded, multiple producer/consumer, lock-free queue --- src/libstd/rt/comm.rs | 71 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index 4772a8596bfb6..ef2091f789c08 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -471,6 +471,44 @@ impl Clone for SharedPort { } } +// XXX: Need better name +type MegaPipe = (SharedPort, SharedChan); + +pub fn megapipe() -> MegaPipe { + let (port, chan) = stream(); + (SharedPort::new(port), SharedChan::new(chan)) +} + +impl GenericChan for MegaPipe { + fn send(&self, val: T) { + match *self { + (_, ref c) => c.send(val) + } + } +} + +impl GenericSmartChan for MegaPipe { + fn try_send(&self, val: T) -> bool { + match *self { + (_, ref c) => c.try_send(val) + } + } +} + +impl GenericPort for MegaPipe { + fn recv(&self) -> T { + match *self { + (ref p, _) => p.recv() + } + } + + fn try_recv(&self) -> Option { + match *self { + (ref p, _) => p.try_recv() + } + } +} + #[cfg(test)] mod test { use super::*; @@ -834,5 +872,38 @@ mod test { assert!(recvd == send_total); } } + + #[test] + fn megapipe_stress() { + use rand; + use rand::RngUtil; + + do run_in_mt_newsched_task { + let (end_port, end_chan) = stream::<()>(); + let end_chan = SharedChan::new(end_chan); + let pipe = megapipe(); + let total = stress_factor() + 10; + let mut rng = rand::rng(); + for total.times { + let msgs = rng.gen_uint_range(0, 10); + let pipe_clone = pipe.clone(); + let end_chan_clone = end_chan.clone(); + do spawntask_random { + for msgs.times { + pipe_clone.send(()); + } + for msgs.times { + pipe_clone.recv(); + } + } + + end_chan_clone.send(()); + } + + for total.times { + end_port.recv(); + } + } + } } From 80849e78a847f7834f71b36a66251ba0ea37a982 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 1 Jun 2013 20:31:33 -0700 Subject: [PATCH 027/111] std: Fix stage0 build Conflicts: src/libstd/rt/comm.rs --- src/libstd/rt/comm.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index ef2091f789c08..449ac9e14a41b 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -399,6 +399,12 @@ impl GenericChan for SharedChan { } impl GenericSmartChan for SharedChan { + #[cfg(stage0)] // odd type checking errors + fn try_send(&self, _val: T) -> bool { + fail!() + } + + #[cfg(not(stage0))] fn try_send(&self, val: T) -> bool { unsafe { let (next_pone, next_cone) = oneshot(); @@ -442,6 +448,12 @@ impl GenericPort for SharedPort { } } + #[cfg(stage0)] // odd type checking errors + fn try_recv(&self) -> Option { + fail!() + } + + #[cfg(not(stage0))] fn try_recv(&self) -> Option { unsafe { let (next_link_port, next_link_chan) = oneshot(); From f9a5005f52d528797d6b98a3bee73ab2d71b9aa3 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 5 Jun 2013 22:34:35 -0700 Subject: [PATCH 028/111] rt: Add rust_get_num_cpus --- src/rt/rust_builtin.cpp | 7 +++++++ src/rt/rust_env.cpp | 6 +++--- src/rt/rustrt.def.in | 1 + 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 5e7357c9b7b25..fe4e75fb8d21f 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -930,6 +930,13 @@ rust_begin_unwind(uintptr_t token) { #endif } +extern int get_num_cpus(); + +extern "C" CDECL uintptr_t +rust_get_num_cpus() { + return get_num_cpus(); +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rust_env.cpp b/src/rt/rust_env.cpp index ed38be3550f74..c3d38851e7bb2 100644 --- a/src/rt/rust_env.cpp +++ b/src/rt/rust_env.cpp @@ -40,7 +40,7 @@ rust_drop_env_lock() { } #if defined(__WIN32__) -static int +int get_num_cpus() { SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); @@ -48,7 +48,7 @@ get_num_cpus() { return (int) sysinfo.dwNumberOfProcessors; } #elif defined(__BSD__) -static int +int get_num_cpus() { /* swiped from http://stackoverflow.com/questions/150355/ programmatically-find-the-number-of-cores-on-a-machine */ @@ -75,7 +75,7 @@ get_num_cpus() { return numCPU; } #elif defined(__GNUC__) -static int +int get_num_cpus() { return sysconf(_SC_NPROCESSORS_ONLN); } diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index e3e522aa7ceec..9b49583519eca 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -239,3 +239,4 @@ rust_valgrind_stack_deregister rust_take_env_lock rust_drop_env_lock rust_update_log_settings +rust_get_num_cpus \ No newline at end of file From 8afec77cb07394c5f2d54dcc0ebe075fc304efb7 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 5 Jun 2013 22:35:23 -0700 Subject: [PATCH 029/111] std::rt: Configure test threads with RUST_TEST_THREADS. Default is ncores x2 --- src/libstd/rt/test.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 907d289fb0748..c8df3a6120338 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -59,13 +59,24 @@ pub fn run_in_newsched_task(f: ~fn()) { /// in one of the schedulers. The schedulers will stay alive /// until the function `f` returns. pub fn run_in_mt_newsched_task(f: ~fn()) { + use libc; + use os; + use from_str::FromStr; use rt::uv::uvio::UvEventLoop; use rt::sched::Shutdown; let f_cell = Cell(f); do run_in_bare_thread { - static N: uint = 4; + let nthreads = match os::getenv("RUST_TEST_THREADS") { + Some(nstr) => FromStr::from_str(nstr).get(), + None => unsafe { + // Using more threads than cores in test code + // to force the OS to preempt them frequently. + // Assuming that this help stress test concurrent types. + rust_get_num_cpus() * 2 + } + }; let sleepers = SleeperList::new(); let work_queue = WorkQueue::new(); @@ -73,7 +84,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let mut handles = ~[]; let mut scheds = ~[]; - for uint::range(0, N) |_| { + for uint::range(0, nthreads) |_| { let loop_ = ~UvEventLoop::new(); let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); let handle = sched.make_handle(); @@ -111,6 +122,10 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { // Wait for schedulers let _threads = threads; } + + extern { + fn rust_get_num_cpus() -> libc::uintptr_t; + } } /// Test tasks will abort on failure instead of unwinding From d6ccc6bc99386ae20ac03b68e7ec504a16068242 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 6 Jun 2013 00:01:22 -0700 Subject: [PATCH 030/111] std::rt: Fix stream test to be parallel --- src/libstd/rt/comm.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index 449ac9e14a41b..b00df78f433d7 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -748,7 +748,7 @@ mod test { #[test] fn stream_send_recv_stress() { for stress_factor().times { - do run_in_newsched_task { + do run_in_mt_newsched_task { let (port, chan) = stream::<~int>(); send(chan, 0); @@ -758,18 +758,18 @@ mod test { if i == 10 { return } let chan_cell = Cell(chan); - let _thread = do spawntask_thread { + do spawntask_random { let chan = chan_cell.take(); chan.send(~i); send(chan, i + 1); - }; + } } fn recv(port: Port<~int>, i: int) { if i == 10 { return } let port_cell = Cell(port); - let _thread = do spawntask_thread { + do spawntask_random { let port = port_cell.take(); assert!(port.recv() == ~i); recv(port, i + 1); From d4de99aa6c53b0eb0d5be2ccfc62e2c89b2cd2df Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 6 Jun 2013 17:46:45 -0700 Subject: [PATCH 031/111] std::rt: Fix a race in the UvRemoteCallback dtor --- src/libstd/rt/uv/uvio.rs | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 0d9530239a3d9..0f98ab11513d6 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -24,7 +24,7 @@ use rt::sched::Scheduler; use rt::io::{standard_error, OtherIoError}; use rt::tube::Tube; use rt::local::Local; -use unstable::sync::{UnsafeAtomicRcBox, AtomicInt}; +use unstable::sync::{Exclusive, exclusive}; #[cfg(test)] use container::Container; #[cfg(test)] use uint; @@ -105,21 +105,20 @@ fn test_callback_run_once() { pub struct UvRemoteCallback { // The uv async handle for triggering the callback async: AsyncWatcher, - // An atomic flag to tell the callback to exit, - // set from the dtor. - exit_flag: UnsafeAtomicRcBox + // A flag to tell the callback to exit, set from the dtor. This is + // almost never contested - only in rare races with the dtor. + exit_flag: Exclusive } impl UvRemoteCallback { pub fn new(loop_: &mut Loop, f: ~fn()) -> UvRemoteCallback { - let exit_flag = UnsafeAtomicRcBox::new(AtomicInt::new(0)); + let exit_flag = exclusive(false); let exit_flag_clone = exit_flag.clone(); let async = do AsyncWatcher::new(loop_) |watcher, status| { assert!(status.is_none()); f(); - let exit_flag_ptr = exit_flag_clone.get(); - unsafe { - if (*exit_flag_ptr).load() == 1 { + do exit_flag_clone.with_imm |&should_exit| { + if should_exit { watcher.close(||()); } } @@ -139,9 +138,14 @@ impl Drop for UvRemoteCallback { fn finalize(&self) { unsafe { let this: &mut UvRemoteCallback = cast::transmute_mut(self); - let exit_flag_ptr = this.exit_flag.get(); - (*exit_flag_ptr).store(1); - this.async.send(); + do this.exit_flag.with |should_exit| { + // NB: These two things need to happen atomically. Otherwise + // the event handler could wake up due to a *previous* + // signal and see the exit flag, destroying the handle + // before the final send. + *should_exit = true; + this.async.send(); + } } } } From d83d38c7fe3408848664de66a9a53587f627a01b Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 6 Jun 2013 20:27:27 -0700 Subject: [PATCH 032/111] std::rt: Reduce task stack size to 1MB --- src/libstd/rt/sched.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index df231f6d88aec..97a1c26ed4dc4 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -523,7 +523,7 @@ pub impl Coroutine { task: ~Task, start: ~fn()) -> Coroutine { - static MIN_STACK_SIZE: uint = 10000000; // XXX: Too much stack + static MIN_STACK_SIZE: uint = 1000000; // XXX: Too much stack let start = Coroutine::build_start_wrapper(start); let mut stack = stack_pool.take_segment(MIN_STACK_SIZE); From d64d26cd39a86a40feb0db7a9147cc2ae5e82994 Mon Sep 17 00:00:00 2001 From: toddaaro Date: Mon, 10 Jun 2013 15:29:02 -0700 Subject: [PATCH 033/111] debugged a compiler ICE when merging local::borrow changes into the main io branch and modified the incoming new file lang.rs to be api-compatible --- src/libstd/macros.rs | 25 +++++++++++++++++++------ src/libstd/rt/comm.rs | 4 ++-- src/libstd/rt/local.rs | 35 ++++++++++++++++++++++++++++++----- src/libstd/rt/mod.rs | 2 +- src/libstd/rt/sched.rs | 2 +- src/libstd/rt/task.rs | 4 ++-- src/libstd/rt/tube.rs | 2 +- src/libstd/rt/uv/uvio.rs | 2 +- src/libstd/task/mod.rs | 2 +- src/libstd/unstable/lang.rs | 4 ++-- 10 files changed, 60 insertions(+), 22 deletions(-) diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs index fda48b6ffb7d9..bf5b36c7580ac 100644 --- a/src/libstd/macros.rs +++ b/src/libstd/macros.rs @@ -38,16 +38,29 @@ macro_rules! rtassert ( } ) ) + +// The do_abort function was originally inside the abort macro, but +// this was ICEing the compiler so it has been moved outside. Now this +// seems to work? +pub fn do_abort() -> ! { + unsafe { ::libc::abort(); } +} + macro_rules! abort( ($( $msg:expr),+) => ( { rtdebug!($($msg),+); - do_abort(); +// do_abort(); + + // NB: This is in a fn to avoid putting the `unsafe` block in + // a macro, which causes spurious 'unnecessary unsafe block' + // warnings. +// fn do_abort() -> ! { +// unsafe { ::libc::abort(); } +// } + + ::macros::do_abort(); - // NB: This is in a fn to avoid putting the `unsafe` block in a macro, - // which causes spurious 'unnecessary unsafe block' warnings. - fn do_abort() -> ! { - unsafe { ::libc::abort(); } - } } ) ) + diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index b00df78f433d7..88c7b9a2bf268 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -120,13 +120,13 @@ impl ChanOne { match oldstate { STATE_BOTH => { // Port is not waiting yet. Nothing to do - do Local::borrow:: |sched| { + do Local::borrow:: |sched| { rtdebug!("non-rendezvous send"); sched.metrics.non_rendezvous_sends += 1; } } STATE_ONE => { - do Local::borrow:: |sched| { + do Local::borrow:: |sched| { rtdebug!("rendezvous send"); sched.metrics.rendezvous_sends += 1; } diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index e6988c538881a..359cf5fc3e179 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -18,7 +18,7 @@ pub trait Local { fn put(value: ~Self); fn take() -> ~Self; fn exists() -> bool; - fn borrow(f: &fn(&mut Self)); + fn borrow(f: &fn(&mut Self) -> T) -> T; unsafe fn unsafe_borrow() -> *mut Self; unsafe fn try_unsafe_borrow() -> Option<*mut Self>; } @@ -27,7 +27,20 @@ impl Local for Scheduler { fn put(value: ~Scheduler) { unsafe { local_ptr::put(value) }} fn take() -> ~Scheduler { unsafe { local_ptr::take() } } fn exists() -> bool { local_ptr::exists() } - fn borrow(f: &fn(&mut Scheduler)) { unsafe { local_ptr::borrow(f) } } + fn borrow(f: &fn(&mut Scheduler) -> T) -> T { + let mut res: Option = None; + let res_ptr: *mut Option = &mut res; + unsafe { + do local_ptr::borrow |sched| { + let result = f(sched); + *res_ptr = Some(result); + } + } + match res { + Some(r) => { r } + None => abort!("function failed!") + } + } unsafe fn unsafe_borrow() -> *mut Scheduler { local_ptr::unsafe_borrow() } unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { abort!("unimpl") } } @@ -36,8 +49,8 @@ impl Local for Task { fn put(_value: ~Task) { abort!("unimpl") } fn take() -> ~Task { abort!("unimpl") } fn exists() -> bool { abort!("unimpl") } - fn borrow(f: &fn(&mut Task)) { - do Local::borrow:: |sched| { + fn borrow(f: &fn(&mut Task) -> T) -> T { + do Local::borrow:: |sched| { match sched.current_task { Some(~ref mut task) => { f(&mut *task.task) @@ -74,7 +87,7 @@ impl Local for IoFactoryObject { fn put(_value: ~IoFactoryObject) { abort!("unimpl") } fn take() -> ~IoFactoryObject { abort!("unimpl") } fn exists() -> bool { abort!("unimpl") } - fn borrow(_f: &fn(&mut IoFactoryObject)) { abort!("unimpl") } + fn borrow(_f: &fn(&mut IoFactoryObject) -> T) -> T { abort!("unimpl") } unsafe fn unsafe_borrow() -> *mut IoFactoryObject { let sched = Local::unsafe_borrow::(); let io: *mut IoFactoryObject = (*sched).event_loop.io().unwrap(); @@ -115,4 +128,16 @@ mod test { } let _scheduler: ~Scheduler = Local::take(); } + + #[test] + fn borrow_with_return() { + let scheduler = ~new_test_uv_sched(); + Local::put(scheduler); + let res = do Local::borrow:: |_sched| { + true + }; + assert!(res) + let _scheduler: ~Scheduler = Local::take(); + } + } diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index caf3e15e535af..3198b2858763a 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -208,7 +208,7 @@ pub fn context() -> RuntimeContext { } else { if Local::exists::() { let context = ::cell::empty_cell(); - do Local::borrow:: |sched| { + do Local::borrow:: |sched| { if sched.in_task_context() { context.put_back(TaskContext); } else { diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index df231f6d88aec..d149ff6d77319 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -683,7 +683,7 @@ mod test { assert_eq!(count, MAX); fn run_task(count_ptr: *mut int) { - do Local::borrow:: |sched| { + do Local::borrow:: |sched| { let task = ~do Coroutine::new(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index cf4967b12b304..4d9851d3b409b 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -62,7 +62,7 @@ impl Task { pub fn run(&mut self, f: &fn()) { // This is just an assertion that `run` was called unsafely // and this instance of Task is still accessible. - do Local::borrow:: |task| { + do Local::borrow:: |task| { assert!(ptr::ref_eq(task, self)); } @@ -87,7 +87,7 @@ impl Task { fn destroy(&mut self) { // This is just an assertion that `destroy` was called unsafely // and this instance of Task is still accessible. - do Local::borrow:: |task| { + do Local::borrow:: |task| { assert!(ptr::ref_eq(task, self)); } match self.storage { diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs index 4482a92d916aa..c94b0bd642362 100644 --- a/src/libstd/rt/tube.rs +++ b/src/libstd/rt/tube.rs @@ -155,7 +155,7 @@ mod test { if i == 100 { return; } let tube = Cell(Cell(tube)); - do Local::borrow:: |sched| { + do Local::borrow:: |sched| { let tube = tube.take(); do sched.event_loop.callback { let mut tube = tube.take(); diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 0f98ab11513d6..ebeb1c204514f 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -167,7 +167,7 @@ mod test_remote { let mut tube = Tube::new(); let tube_clone = tube.clone(); let remote_cell = cell::empty_cell(); - do Local::borrow::() |sched| { + do Local::borrow::() |sched| { let tube_clone = tube_clone.clone(); let tube_clone_cell = Cell(tube_clone); let remote = do sched.event_loop.remote_callback { diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index f24d2327358be..df5b88207eccf 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -514,7 +514,7 @@ pub fn failing() -> bool { } _ => { let mut unwinding = false; - do Local::borrow:: |local| { + do Local::borrow:: |local| { unwinding = match local.unwinder { Some(unwinder) => { unwinder.unwinding diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs index 350b18d454169..21ef347874468 100644 --- a/src/libstd/unstable/lang.rs +++ b/src/libstd/unstable/lang.rs @@ -244,7 +244,7 @@ pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char { } _ => { let mut alloc = ::ptr::null(); - do Local::borrow:: |task| { + do Local::borrow:: |task| { alloc = task.heap.alloc(td as *c_void, size as uint) as *c_char; } return alloc; @@ -262,7 +262,7 @@ pub unsafe fn local_free(ptr: *c_char) { rustrt::rust_upcall_free_noswitch(ptr); } _ => { - do Local::borrow:: |task| { + do Local::borrow:: |task| { task.heap.free(ptr as *c_void); } } From 84d269592168b2e8ca9784ada5d86ea6cdb9de9f Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 10 Jun 2013 17:46:49 -0700 Subject: [PATCH 034/111] std::rt: Work around a dynamic borrowck bug --- src/libstd/rt/io/extensions.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/libstd/rt/io/extensions.rs b/src/libstd/rt/io/extensions.rs index fcbf31e87f2c0..ad9658e48ba1f 100644 --- a/src/libstd/rt/io/extensions.rs +++ b/src/libstd/rt/io/extensions.rs @@ -749,8 +749,6 @@ mod test { #[should_fail] #[ignore(cfg(windows))] fn push_bytes_fail_reset_len() { - use unstable::finally::Finally; - // push_bytes unsafely sets the vector length. This is testing that // upon failure the length is reset correctly. let mut reader = MockReader::new(); @@ -772,7 +770,8 @@ mod test { reader.push_bytes(&mut *buf, 4); }).finally { // NB: Using rtassert here to trigger abort on failure since this is a should_fail test - rtassert!(*buf == ~[8, 9, 10]); + // FIXME: #7049 This fails because buf is still borrowed + //rtassert!(*buf == ~[8, 9, 10]); } } From 84280819585fb65bf18903aef9364579f3552522 Mon Sep 17 00:00:00 2001 From: toddaaro Date: Wed, 12 Jun 2013 11:32:22 -0700 Subject: [PATCH 035/111] A basic implementation of pinning tasks to schedulers. No IO interactions have been planned for, and no forwarding of tasks off special schedulers is supported. --- src/libstd/rt/sched.rs | 414 ++++++++++++++++++++++++++++++++++++----- src/libstd/rt/test.rs | 124 ++++++++++++ 2 files changed, 496 insertions(+), 42 deletions(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index d149ff6d77319..698cafdf8c615 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -26,6 +26,11 @@ use rt::local::Local; use rt::rtio::RemoteCallback; use rt::metrics::SchedMetrics; +//use to_str::ToStr; + +/// To allow for using pointers as scheduler ids +use ptr::{to_uint}; + /// The Scheduler is responsible for coordinating execution of Coroutines /// on a single thread. When the scheduler is running it is owned by /// thread local storage and the running task is owned by the @@ -70,7 +75,8 @@ pub struct Scheduler { pub struct SchedHandle { priv remote: ~RemoteCallbackObject, - priv queue: MessageQueue + priv queue: MessageQueue, + sched_id: uint } pub struct Coroutine { @@ -81,12 +87,37 @@ pub struct Coroutine { /// the task is dead priv saved_context: Context, /// The heap, GC, unwinding, local storage, logging + task: ~Task, + /// The scheduler that this task calls home + home_sched: SchedHome +} + +// To send a Coroutine to another task we have to use contained home +// information (the SchedHandle). So we need a form that doesn't +// include one. + +// XXX perf: Evaluate this structure - there should be a clever way to +// make it such that we don't need to deal with building/destructing +// on Coroutines that aren't homed. + +pub struct HomelessCoroutine { + priv current_stack_segment: StackSegment, + priv saved_context: Context, task: ~Task } + +// A scheduler home is either a handle to the home scheduler, or an +// explicit "AnySched". + +pub enum SchedHome { + AnySched, + Sched(SchedHandle) +} pub enum SchedMessage { Wake, - Shutdown + Shutdown, + BiasedTask(~HomelessCoroutine) } enum CleanupJob { @@ -96,6 +127,8 @@ enum CleanupJob { pub impl Scheduler { + pub fn sched_id(&self) -> uint { to_uint(self) } + fn in_task_context(&self) -> bool { self.current_task.is_some() } fn new(event_loop: ~EventLoopObject, @@ -151,7 +184,8 @@ pub impl Scheduler { // XXX: Reenable this once we're using a per-task queue. With a shared // queue this is not true //assert!(sched.work_queue.is_empty()); - rtdebug!("scheduler metrics: %s\n", sched.metrics.to_str()); +// let out = sched.metrics.to_str(); +// rtdebug!("scheduler metrics: %s\n", out); return sched; } @@ -209,35 +243,122 @@ pub impl Scheduler { return SchedHandle { remote: remote, - queue: self.message_queue.clone() + queue: self.message_queue.clone(), + sched_id: self.sched_id() }; } /// Schedule a task to be executed later. /// - /// Pushes the task onto the work stealing queue and tells the event loop - /// to run it later. Always use this instead of pushing to the work queue - /// directly. - fn enqueue_task(&mut self, task: ~Coroutine) { - self.work_queue.push(task); - self.event_loop.callback(Scheduler::run_sched_once); - - // We've made work available. Notify a sleeping scheduler. - // XXX: perf. Check for a sleeper without synchronizing memory. - // It's not critical that we always find it. - // XXX: perf. If there's a sleeper then we might as well just send - // it the task directly instead of pushing it to the - // queue. That is essentially the intent here and it is less - // work. - match self.sleeper_list.pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake) + /// Pushes the task onto the work stealing queue and tells the + /// event loop to run it later. Always use this instead of pushing + /// to the work queue directly. + + fn enqueue_task(&mut self, mut task: ~Coroutine) { + + // We don't want to queue tasks that belong on other threads, + // so we send them home at enqueue time. + + // The borrow checker doesn't like our disassembly of the + // Coroutine struct and partial use and mutation of the + // fields. So completely disassemble here and stop using? + + // XXX perf: I think we might be able to shuffle this code to + // only destruct when we need to. + + rtdebug!("a task was queued on: %u", self.sched_id()); + + let this = self; + + match task { + ~Coroutine { current_stack_segment: css, + saved_context: sc, + task: t, + home_sched: home_sched } => { + + let mut home_sched = home_sched; + + match home_sched { + Sched(ref mut home_handle) + if home_handle.sched_id != this.sched_id() => { + + // In this branch we know the task is not + // home, so we send it home. + + rtdebug!("home_handle_id: %u, loc: %u", + home_handle.sched_id, + this.sched_id()); + let homeless = ~HomelessCoroutine { + current_stack_segment: css, + saved_context: sc, + task: t + }; + home_handle.send(BiasedTask(homeless)); + rtdebug!("sent task home"); + return (); + } + Sched( ref mut home_handle) => { + + // Here we know the task is home, so we need + // to "keep" it home. Since we don't have a + // scheduler-local queue for this purpose, we + // just use our message queue. + + rtdebug!("homed task at home, sending to self"); + let homeless = ~HomelessCoroutine { + current_stack_segment: css, + saved_context: sc, + task: t + }; + home_handle.send(BiasedTask(homeless)); + rtdebug!("sent home to self"); + return (); + + } + _ => { + + // We just destroyed our Coroutine ... but now + // we want it back. Build a new one? + // XXX: perf: see above comment about not + // destroying + + let task = ~Coroutine { + current_stack_segment: css, + saved_context: sc, + task: t, + home_sched: AnySched }; + + + // We push the task onto our local queue. + this.work_queue.push(task); + this.event_loop.callback(Scheduler::run_sched_once); + + // We've made work available. Notify a + // sleeping scheduler. + + // XXX: perf. Check for a sleeper without + // synchronizing memory. It's not critical + // that we always find it. + + // XXX: perf. If there's a sleeper then we + // might as well just send it the task + // directly instead of pushing it to the + // queue. That is essentially the intent here + // and it is less work. + match this.sleeper_list.pop() { + Some(handle) => { + let mut handle = handle; + handle.send(Wake) + } + None => { (/* pass */) } + }; + } + } } - None => (/* pass */) } } + // * Scheduler-context operations fn interpret_message_queue(~self) -> bool { @@ -247,6 +368,27 @@ pub impl Scheduler { let mut this = self; match this.message_queue.pop() { + Some(BiasedTask(~HomelessCoroutine { + current_stack_segment: css, + saved_context: sc, + task: t})) => { + rtdebug!("recv BiasedTask message in sched: %u", + this.sched_id()); + + // Since this was the "send home" message for a task, + // we know that this is the home. So we rebuild the + // sched_handle. + + let task = ~Coroutine { + current_stack_segment: css, + saved_context: sc, + task: t, + home_sched: Sched(this.make_handle()) + }; + this.resume_task_immediately(task); + return true; + } + Some(Wake) => { rtdebug!("recv Wake message"); this.sleepy = false; @@ -256,8 +398,9 @@ pub impl Scheduler { Some(Shutdown) => { rtdebug!("recv Shutdown message"); if this.sleepy { - // There may be an outstanding handle on the sleeper list. - // Pop them all to make sure that's not the case. + // There may be an outstanding handle on the + // sleeper list. Pop them all to make sure that's + // not the case. loop { match this.sleeper_list.pop() { Some(handle) => { @@ -268,8 +411,8 @@ pub impl Scheduler { } } } - // No more sleeping. After there are no outstanding event loop - // references we will shut down. + // No more sleeping. After there are no outstanding + // event loop references we will shut down. this.no_sleep = true; this.sleepy = false; Local::put(this); @@ -515,16 +658,39 @@ impl SchedHandle { } pub impl Coroutine { + + + /// This function checks that a coroutine is running "home". + fn am_home(&self) -> bool { + do Local::borrow:: |sched| { + match self.home_sched { + AnySched => { true } + Sched(SchedHandle { sched_id: ref id, _ }) => { + *id == sched.sched_id() + } + } + } + } + + // Created new variants of "new" that takes a home scheduler + // parameter. The original with_task now calls with_task_homed + // using the AnySched paramter. + + fn new_homed(stack_pool: &mut StackPool, home: SchedHome, start: ~fn()) -> Coroutine { + Coroutine::with_task_homed(stack_pool, ~Task::new(), start, home) + } + fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { Coroutine::with_task(stack_pool, ~Task::new(), start) } - fn with_task(stack_pool: &mut StackPool, - task: ~Task, - start: ~fn()) -> Coroutine { - + fn with_task_homed(stack_pool: &mut StackPool, + task: ~Task, + start: ~fn(), + home: SchedHome) -> Coroutine { + static MIN_STACK_SIZE: uint = 10000000; // XXX: Too much stack - + let start = Coroutine::build_start_wrapper(start); let mut stack = stack_pool.take_segment(MIN_STACK_SIZE); // NB: Context holds a pointer to that ~fn @@ -532,10 +698,20 @@ pub impl Coroutine { return Coroutine { current_stack_segment: stack, saved_context: initial_context, - task: task + task: task, + home_sched: home }; } + fn with_task(stack_pool: &mut StackPool, + task: ~Task, + start: ~fn()) -> Coroutine { + Coroutine::with_task_homed(stack_pool, + task, + start, + AnySched) + } + priv fn build_start_wrapper(start: ~fn()) -> ~fn() { // XXX: The old code didn't have this extra allocation let start_cell = Cell(start); @@ -549,17 +725,20 @@ pub impl Coroutine { let sched = Local::unsafe_borrow::(); let task = (*sched).current_task.get_mut_ref(); - // FIXME #6141: shouldn't neet to put `start()` in another closure + // FIXME #6141: shouldn't neet to put `start()` in + // another closure let start_cell = Cell(start_cell.take()); do task.task.run { - // N.B. Removing `start` from the start wrapper closure - // by emptying a cell is critical for correctness. The ~Task - // pointer, and in turn the closure used to initialize the first - // call frame, is destroyed in scheduler context, not task context. - // So any captured closures must not contain user-definable dtors - // that expect to be in task context. By moving `start` out of - // the closure, all the user code goes out of scope while - // the task is still running. + // N.B. Removing `start` from the start wrapper + // closure by emptying a cell is critical for + // correctness. The ~Task pointer, and in turn the + // closure used to initialize the first call + // frame, is destroyed in scheduler context, not + // task context. So any captured closures must + // not contain user-definable dtors that expect to + // be in task context. By moving `start` out of + // the closure, all the user code goes out of + // scope while the task is still running. let start = start_cell.take(); start(); }; @@ -603,6 +782,156 @@ mod test { use rt::test::*; use super::*; use rt::thread::Thread; + use ptr::to_uint; + + // Confirm that a sched_id actually is the uint form of the + // pointer to the scheduler struct. + + #[test] + fn simple_sched_id_test() { + do run_in_bare_thread { + let sched = ~new_test_uv_sched(); + assert!(to_uint(sched) == sched.sched_id()); + } + } + + // Compare two scheduler ids that are different, this should never + // fail but may catch a mistake someday. + + #[test] + fn compare_sched_id_test() { + do run_in_bare_thread { + let sched_one = ~new_test_uv_sched(); + let sched_two = ~new_test_uv_sched(); + assert!(sched_one.sched_id() != sched_two.sched_id()); + } + } + + // A simple test to check if a homed task run on a single + // scheduler ends up executing while home. + + #[test] + fn test_home_sched() { + do run_in_bare_thread { + let mut task_ran = false; + let task_ran_ptr: *mut bool = &mut task_ran; + let mut sched = ~new_test_uv_sched(); + + let sched_handle = sched.make_handle(); + let sched_id = sched.sched_id(); + + let task = ~do Coroutine::new_homed(&mut sched.stack_pool, + Sched(sched_handle)) { + unsafe { *task_ran_ptr = true }; + let sched = Local::take::(); + assert!(sched.sched_id() == sched_id); + Local::put::(sched); + }; + sched.enqueue_task(task); + sched.run(); + assert!(task_ran); + } + } + + // The following test is a bit of a mess, but it trys to do + // something tricky so I'm not sure how to get around this in the + // short term. + + // A number of schedulers are created, and then a task is created + // and assigned a home scheduler. It is then "started" on a + // different scheduler. The scheduler it is started on should + // observe that the task is not home, and send it home. + + // This test is light in that it does very little. + + #[test] + fn test_transfer_task_home() { + + use rt::uv::uvio::UvEventLoop; + use rt::sched::Shutdown; + use rt::sleeper_list::SleeperList; + use rt::work_queue::WorkQueue; + use uint; + use container::Container; + use old_iter::MutableIter; + use vec::OwnedVector; + + do run_in_bare_thread { + + static N: uint = 8; + + let sleepers = SleeperList::new(); + let work_queue = WorkQueue::new(); + + let mut handles = ~[]; + let mut scheds = ~[]; + + for uint::range(0, N) |_| { + let loop_ = ~UvEventLoop::new(); + let mut sched = ~Scheduler::new(loop_, + work_queue.clone(), + sleepers.clone()); + let handle = sched.make_handle(); + rtdebug!("sched id: %u", handle.sched_id); + handles.push(handle); + scheds.push(sched); + }; + + let handles = Cell(handles); + + let home_handle = scheds[6].make_handle(); + let home_id = home_handle.sched_id; + let home = Sched(home_handle); + + let main_task = ~do Coroutine::new_homed(&mut scheds[1].stack_pool, home) { + + // Here we check if the task is running on its home. + let sched = Local::take::(); + rtdebug!("run location scheduler id: %u, home: %u", + sched.sched_id(), + home_id); + assert!(sched.sched_id() == home_id); + Local::put::(sched); + + let mut handles = handles.take(); + for handles.each_mut |handle| { + handle.send(Shutdown); + } + }; + + scheds[0].enqueue_task(main_task); + + let mut threads = ~[]; + + while !scheds.is_empty() { + let sched = scheds.pop(); + let sched_cell = Cell(sched); + let thread = do Thread::start { + let sched = sched_cell.take(); + sched.run(); + }; + threads.push(thread); + } + + let _threads = threads; + } + } + + // The goal is that this is the high-stress test for making sure + // homing is working. It allocates 120*RUST_RT_STRESS tasks that + // do nothing but assert that they are home at execution + // time. These tasks are queued to random schedulers, so sometimes + // they are home and sometimes not. It also runs RUST_RT_STRESS + // times. + + #[test] + fn test_stress_homed_tasks() { + let n = stress_factor(); + for int::range(0,n as int) |_| { + run_in_mt_newsched_task_random_homed(); + } + } + #[test] fn test_simple_scheduling() { @@ -877,4 +1206,5 @@ mod test { } } } + } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index c8df3a6120338..97aa76d7db699 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -88,6 +88,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let loop_ = ~UvEventLoop::new(); let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); let handle = sched.make_handle(); + handles.push(handle); scheds.push(sched); } @@ -128,6 +129,96 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { } } +// THIS IS AWFUL. Copy-pasted the above initialization function but +// with a number of hacks to make it spawn tasks on a variety of +// schedulers with a variety of homes using the new spawn. + +pub fn run_in_mt_newsched_task_random_homed() { + use libc; + use os; + use from_str::FromStr; + use rt::uv::uvio::UvEventLoop; + use rt::sched::Shutdown; + + do run_in_bare_thread { + let nthreads = match os::getenv("RUST_TEST_THREADS") { + Some(nstr) => FromStr::from_str(nstr).get(), + None => unsafe { + // Using more threads than cores in test code to force + // the OS to preempt them frequently. Assuming that + // this help stress test concurrent types. + rust_get_num_cpus() * 2 + } + }; + + let sleepers = SleeperList::new(); + let work_queue = WorkQueue::new(); + + let mut handles = ~[]; + let mut scheds = ~[]; + + for uint::range(0, nthreads) |_| { + let loop_ = ~UvEventLoop::new(); + let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); + let handle = sched.make_handle(); + handles.push(handle); + scheds.push(sched); + } + + // Schedule a pile o tasks + let n = 120*stress_factor(); + for uint::range(0,n) |_i| { + rtdebug!("creating task: %u", _i); + let hf: ~fn() = || { assert!(true) }; + spawntask_homed(&mut scheds, hf); + } + + let f: ~fn() = || { assert!(true); }; + + let f_cell = Cell(f); + let handles = Cell(handles); + + rtdebug!("creating main task"); + + let main_task = ~do Coroutine::new(&mut scheds[0].stack_pool) { + f_cell.take()(); + let mut handles = handles.take(); + // Tell schedulers to exit + for handles.each_mut |handle| { + handle.send(Shutdown); + } + }; + + rtdebug!("queuing main task") + + scheds[0].enqueue_task(main_task); + + let mut threads = ~[]; + + while !scheds.is_empty() { + let sched = scheds.pop(); + let sched_cell = Cell(sched); + let thread = do Thread::start { + let sched = sched_cell.take(); + rtdebug!("running sched: %u", sched.sched_id()); + sched.run(); + }; + + threads.push(thread); + } + + rtdebug!("waiting on scheduler threads"); + + // Wait for schedulers + let _threads = threads; + } + + extern { + fn rust_get_num_cpus() -> libc::uintptr_t; + } +} + + /// Test tasks will abort on failure instead of unwinding pub fn spawntask(f: ~fn()) { use super::sched::*; @@ -188,6 +279,38 @@ pub fn spawntask_random(f: ~fn()) { } } +/// Spawn a task, with the current scheduler as home, and queue it to +/// run later. +pub fn spawntask_homed(scheds: &mut ~[~Scheduler], f: ~fn()) { + use super::sched::*; + use rand::{rng, RngUtil}; + let mut rng = rng(); + + let task = { + let sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; + let handle = sched.make_handle(); + let home_id = handle.sched_id; + + // now that we know where this is going, build a new function + // that can assert it is in the right place + let af: ~fn() = || { + do Local::borrow::() |sched| { + rtdebug!("home_id: %u, runtime loc: %u", + home_id, + sched.sched_id()); + assert!(home_id == sched.sched_id()); + }; + f() + }; + + ~Coroutine::with_task_homed(&mut sched.stack_pool, + ~Task::without_unwinding(), + af, + Sched(handle)) + }; + let dest_sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; + dest_sched.enqueue_task(task); +} /// Spawn a task and wait for it to finish, returning whether it completed successfully or failed pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { @@ -266,3 +389,4 @@ pub fn stress_factor() -> uint { } } + From e7213aa21e9a79db01d2e9d1b76761a420e4c967 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 12 Jun 2013 12:00:46 -0700 Subject: [PATCH 036/111] std::rt: Remove old files --- src/libstd/rt/uvio.rs | 459 ------------------------------------------ src/libstd/rt/uvll.rs | 443 ---------------------------------------- 2 files changed, 902 deletions(-) delete mode 100644 src/libstd/rt/uvio.rs delete mode 100644 src/libstd/rt/uvll.rs diff --git a/src/libstd/rt/uvio.rs b/src/libstd/rt/uvio.rs deleted file mode 100644 index 24bffd8d1cd24..0000000000000 --- a/src/libstd/rt/uvio.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use option::*; -use result::*; - -use super::io::net::ip::IpAddr; -use super::uv::*; -use super::rtio::*; -use ops::Drop; -use old_iter::CopyableIter; -use cell::{Cell, empty_cell}; -use cast::transmute; -use super::sched::{Scheduler, local_sched}; - -#[cfg(test)] use container::Container; -#[cfg(test)] use uint; -#[cfg(test)] use unstable::run_in_bare_thread; -#[cfg(test)] use super::test::*; - -pub struct UvEventLoop { - uvio: UvIoFactory -} - -pub impl UvEventLoop { - fn new() -> UvEventLoop { - UvEventLoop { - uvio: UvIoFactory(Loop::new()) - } - } - - /// A convenience constructor - fn new_scheduler() -> Scheduler { - Scheduler::new(~UvEventLoop::new()) - } -} - -impl Drop for UvEventLoop { - fn finalize(&self) { - // XXX: Need mutable finalizer - let this = unsafe { - transmute::<&UvEventLoop, &mut UvEventLoop>(self) - }; - this.uvio.uv_loop().close(); - } -} - -impl EventLoop for UvEventLoop { - - fn run(&mut self) { - self.uvio.uv_loop().run(); - } - - fn callback(&mut self, f: ~fn()) { - let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - do idle_watcher.start |idle_watcher, status| { - assert!(status.is_none()); - let mut idle_watcher = idle_watcher; - idle_watcher.stop(); - idle_watcher.close(); - f(); - } - } - - fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject> { - Some(&mut self.uvio) - } -} - -#[test] -fn test_callback_run_once() { - do run_in_bare_thread { - let mut event_loop = UvEventLoop::new(); - let mut count = 0; - let count_ptr: *mut int = &mut count; - do event_loop.callback { - unsafe { *count_ptr += 1 } - } - event_loop.run(); - assert!(count == 1); - } -} - -pub struct UvIoFactory(Loop); - -pub impl UvIoFactory { - fn uv_loop<'a>(&'a mut self) -> &'a mut Loop { - match self { &UvIoFactory(ref mut ptr) => ptr } - } -} - -impl IoFactory for UvIoFactory { - // Connect to an address and return a new stream - // NB: This blocks the task waiting on the connection. - // It would probably be better to return a future - fn connect(&mut self, addr: IpAddr) -> Option<~StreamObject> { - // Create a cell in the task to hold the result. We will fill - // the cell before resuming the task. - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - - // Block this task and take ownership, switch to scheduler context - do scheduler.deschedule_running_task_and_then |task| { - - rtdebug!("connect: entered scheduler context"); - do local_sched::borrow |scheduler| { - assert!(!scheduler.in_task_context()); - } - let mut tcp_watcher = TcpWatcher::new(self.uv_loop()); - let task_cell = Cell(task); - - // Wait for a connection - do tcp_watcher.connect(addr) |stream_watcher, status| { - rtdebug!("connect: in connect callback"); - let maybe_stream = if status.is_none() { - rtdebug!("status is none"); - Some(~UvStream(stream_watcher)) - } else { - rtdebug!("status is some"); - stream_watcher.close(||()); - None - }; - - // Store the stream in the task's stack - unsafe { (*result_cell_ptr).put_back(maybe_stream); } - - // Context switch - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } - - fn bind(&mut self, addr: IpAddr) -> Option<~TcpListenerObject> { - let mut watcher = TcpWatcher::new(self.uv_loop()); - watcher.bind(addr); - return Some(~UvTcpListener(watcher)); - } -} - -pub struct UvTcpListener(TcpWatcher); - -impl UvTcpListener { - fn watcher(&self) -> TcpWatcher { - match self { &UvTcpListener(w) => w } - } - - fn close(&self) { - // XXX: Need to wait until close finishes before returning - self.watcher().as_stream().close(||()); - } -} - -impl Drop for UvTcpListener { - fn finalize(&self) { - // XXX: Again, this never gets called. Use .close() instead - //self.watcher().as_stream().close(||()); - } -} - -impl TcpListener for UvTcpListener { - - fn listen(&mut self) -> Option<~StreamObject> { - rtdebug!("entering listen"); - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - - let server_tcp_watcher = self.watcher(); - - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - - do scheduler.deschedule_running_task_and_then |task| { - let task_cell = Cell(task); - let mut server_tcp_watcher = server_tcp_watcher; - do server_tcp_watcher.listen |server_stream_watcher, status| { - let maybe_stream = if status.is_none() { - let mut server_stream_watcher = server_stream_watcher; - let mut loop_ = loop_from_watcher(&server_stream_watcher); - let client_tcp_watcher = TcpWatcher::new(&mut loop_).as_stream(); - // XXX: Needs to be surfaced in interface - server_stream_watcher.accept(client_tcp_watcher); - Some(~UvStream::new(client_tcp_watcher)) - } else { - None - }; - - unsafe { (*result_cell_ptr).put_back(maybe_stream); } - - rtdebug!("resuming task from listen"); - // Context switch - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } -} - -pub struct UvStream(StreamWatcher); - -impl UvStream { - fn new(watcher: StreamWatcher) -> UvStream { - UvStream(watcher) - } - - fn watcher(&self) -> StreamWatcher { - match self { &UvStream(w) => w } - } - - // XXX: finalize isn't working for ~UvStream??? - fn close(&self) { - // XXX: Need to wait until this finishes before returning - self.watcher().close(||()); - } -} - -impl Drop for UvStream { - fn finalize(&self) { - rtdebug!("closing stream"); - //self.watcher().close(||()); - } -} - -impl Stream for UvStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - let watcher = self.watcher(); - let buf_ptr: *&mut [u8] = &buf; - do scheduler.deschedule_running_task_and_then |task| { - rtdebug!("read: entered scheduler context"); - do local_sched::borrow |scheduler| { - assert!(!scheduler.in_task_context()); - } - let mut watcher = watcher; - let task_cell = Cell(task); - // XXX: We shouldn't reallocate these callbacks every - // call to read - let alloc: AllocCallback = |_| unsafe { - slice_to_uv_buf(*buf_ptr) - }; - do watcher.read_start(alloc) |watcher, nread, _buf, status| { - - // Stop reading so that no read callbacks are - // triggered before the user calls `read` again. - // XXX: Is there a performance impact to calling - // stop here? - let mut watcher = watcher; - watcher.read_stop(); - - let result = if status.is_none() { - assert!(nread >= 0); - Ok(nread as uint) - } else { - Err(()) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } - - fn write(&mut self, buf: &[u8]) -> Result<(), ()> { - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - let watcher = self.watcher(); - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |task| { - let mut watcher = watcher; - let task_cell = Cell(task); - let buf = unsafe { &*buf_ptr }; - // XXX: OMGCOPIES - let buf = buf.to_vec(); - do watcher.write(buf) |_watcher, status| { - let result = if status.is_none() { - Ok(()) - } else { - Err(()) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } -} - -#[test] -fn test_simple_io_no_connect() { - do run_in_newsched_task { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let addr = next_test_ip4(); - let maybe_chan = io.connect(addr); - assert!(maybe_chan.is_none()); - } -} - -#[test] -fn test_simple_tcp_server_and_client() { - do run_in_newsched_task { - let addr = next_test_ip4(); - - // Start the server first so it's listening when we connect - do spawntask_immediately { - unsafe { - let io = local_sched::unsafe_borrow_io(); - let mut listener = io.bind(addr).unwrap(); - let mut stream = listener.listen().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert!(nread == 8); - for uint::range(0, nread) |i| { - rtdebug!("%u", buf[i] as uint); - assert!(buf[i] == i as u8); - } - stream.close(); - listener.close(); - } - } - - do spawntask_immediately { - unsafe { - let io = local_sched::unsafe_borrow_io(); - let mut stream = io.connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.close(); - } - } - } -} - -#[test] #[ignore(reason = "busted")] -fn test_read_and_block() { - do run_in_newsched_task { - let addr = next_test_ip4(); - - do spawntask_immediately { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let mut listener = io.bind(addr).unwrap(); - let mut stream = listener.listen().unwrap(); - let mut buf = [0, .. 2048]; - - let expected = 32; - let mut current = 0; - let mut reads = 0; - - while current < expected { - let nread = stream.read(buf).unwrap(); - for uint::range(0, nread) |i| { - let val = buf[i] as uint; - assert!(val == current % 8); - current += 1; - } - reads += 1; - - let scheduler = local_sched::take(); - // Yield to the other task in hopes that it - // will trigger a read callback while we are - // not ready for it - do scheduler.deschedule_running_task_and_then |task| { - let task = Cell(task); - do local_sched::borrow |scheduler| { - scheduler.task_queue.push_back(task.take()); - } - } - } - - // Make sure we had multiple reads - assert!(reads > 1); - - stream.close(); - listener.close(); - } - - do spawntask_immediately { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let mut stream = io.connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.close(); - } - - } -} - -#[test] -fn test_read_read_read() { - do run_in_newsched_task { - let addr = next_test_ip4(); - static MAX: uint = 500000; - - do spawntask_immediately { - unsafe { - let io = local_sched::unsafe_borrow_io(); - let mut listener = io.bind(addr).unwrap(); - let mut stream = listener.listen().unwrap(); - let buf = [1, .. 2048]; - let mut total_bytes_written = 0; - while total_bytes_written < MAX { - stream.write(buf); - total_bytes_written += buf.len(); - } - stream.close(); - listener.close(); - } - } - - do spawntask_immediately { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let mut stream = io.connect(addr).unwrap(); - let mut buf = [0, .. 2048]; - let mut total_bytes_read = 0; - while total_bytes_read < MAX { - let nread = stream.read(buf).unwrap(); - rtdebug!("read %u bytes", nread as uint); - total_bytes_read += nread; - for uint::range(0, nread) |i| { - assert!(buf[i] == 1); - } - } - rtdebug!("read %u bytes total", total_bytes_read as uint); - stream.close(); - } - } -} diff --git a/src/libstd/rt/uvll.rs b/src/libstd/rt/uvll.rs deleted file mode 100644 index 0d298bde6b508..0000000000000 --- a/src/libstd/rt/uvll.rs +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - * Low-level bindings to the libuv library. - * - * This module contains a set of direct, 'bare-metal' wrappers around - * the libuv C-API. - * - * We're not bothering yet to redefine uv's structs as Rust structs - * because they are quite large and change often between versions. - * The maintenance burden is just too high. Instead we use the uv's - * `uv_handle_size` and `uv_req_size` to find the correct size of the - * structs and allocate them on the heap. This can be revisited later. - * - * There are also a collection of helper functions to ease interacting - * with the low-level API. - * - * As new functionality, existant in uv.h, is added to the rust stdlib, - * the mappings should be added in this module. - */ - -#[allow(non_camel_case_types)]; // C types - -use libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t}; -use libc::{malloc, free}; -use prelude::*; - -pub struct uv_err_t { - code: c_int, - sys_errno_: c_int -} - -pub struct uv_buf_t { - base: *u8, - len: libc::size_t, -} - -pub type uv_handle_t = c_void; -pub type uv_loop_t = c_void; -pub type uv_idle_t = c_void; -pub type uv_tcp_t = c_void; -pub type uv_connect_t = c_void; -pub type uv_write_t = c_void; -pub type uv_async_t = c_void; -pub type uv_timer_t = c_void; -pub type uv_stream_t = c_void; -pub type uv_fs_t = c_void; - -pub type uv_idle_cb = *u8; - -pub type sockaddr_in = c_void; -pub type sockaddr_in6 = c_void; - -#[deriving(Eq)] -pub enum uv_handle_type { - UV_UNKNOWN_HANDLE, - UV_ASYNC, - UV_CHECK, - UV_FS_EVENT, - UV_FS_POLL, - UV_HANDLE, - UV_IDLE, - UV_NAMED_PIPE, - UV_POLL, - UV_PREPARE, - UV_PROCESS, - UV_STREAM, - UV_TCP, - UV_TIMER, - UV_TTY, - UV_UDP, - UV_SIGNAL, - UV_FILE, - UV_HANDLE_TYPE_MAX -} - -#[deriving(Eq)] -pub enum uv_req_type { - UV_UNKNOWN_REQ, - UV_REQ, - UV_CONNECT, - UV_WRITE, - UV_SHUTDOWN, - UV_UDP_SEND, - UV_FS, - UV_WORK, - UV_GETADDRINFO, - UV_REQ_TYPE_MAX -} - -pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { - assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); - let size = rust_uv_handle_size(handle as uint); - let p = malloc(size); - assert!(p.is_not_null()); - return p; -} - -pub unsafe fn free_handle(v: *c_void) { - free(v) -} - -pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { - assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); - let size = rust_uv_req_size(req as uint); - let p = malloc(size); - assert!(p.is_not_null()); - return p; -} - -pub unsafe fn free_req(v: *c_void) { - free(v) -} - -#[test] -fn handle_sanity_check() { - unsafe { - assert!(UV_HANDLE_TYPE_MAX as uint == rust_uv_handle_type_max()); - } -} - -#[test] -fn request_sanity_check() { - unsafe { - assert!(UV_REQ_TYPE_MAX as uint == rust_uv_req_type_max()); - } -} - -pub unsafe fn loop_new() -> *c_void { - return rust_uv_loop_new(); -} - -pub unsafe fn loop_delete(loop_handle: *c_void) { - rust_uv_loop_delete(loop_handle); -} - -pub unsafe fn run(loop_handle: *c_void) { - rust_uv_run(loop_handle); -} - -pub unsafe fn close(handle: *T, cb: *u8) { - rust_uv_close(handle as *c_void, cb); -} - -pub unsafe fn walk(loop_handle: *c_void, cb: *u8, arg: *c_void) { - rust_uv_walk(loop_handle, cb, arg); -} - -pub unsafe fn idle_new() -> *uv_idle_t { - rust_uv_idle_new() -} - -pub unsafe fn idle_delete(handle: *uv_idle_t) { - rust_uv_idle_delete(handle) -} - -pub unsafe fn idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int { - rust_uv_idle_init(loop_handle, handle) -} - -pub unsafe fn idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int { - rust_uv_idle_start(handle, cb) -} - -pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int { - rust_uv_idle_stop(handle) -} - -pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { - return rust_uv_tcp_init(loop_handle, handle); -} - -// FIXME ref #2064 -pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in, - after_connect_cb: *u8) -> c_int { - return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, - after_connect_cb, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in6, - after_connect_cb: *u8) -> c_int { - return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, - after_connect_cb, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int { - return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int { - return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr); -} - -pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in) -> c_int { - return rust_uv_tcp_getpeername(tcp_handle_ptr, name); -} - -pub unsafe fn tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int { - return rust_uv_tcp_getpeername6(tcp_handle_ptr, name); -} - -pub unsafe fn listen(stream: *T, backlog: c_int, cb: *u8) -> c_int { - return rust_uv_listen(stream as *c_void, backlog, cb); -} - -pub unsafe fn accept(server: *c_void, client: *c_void) -> c_int { - return rust_uv_accept(server as *c_void, client as *c_void); -} - -pub unsafe fn write(req: *uv_write_t, stream: *T, buf_in: &[uv_buf_t], cb: *u8) -> c_int { - let buf_ptr = vec::raw::to_ptr(buf_in); - let buf_cnt = buf_in.len() as i32; - return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb); -} -pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: *u8, on_read: *u8) -> c_int { - return rust_uv_read_start(stream as *c_void, on_alloc, on_read); -} - -pub unsafe fn read_stop(stream: *uv_stream_t) -> c_int { - return rust_uv_read_stop(stream as *c_void); -} - -pub unsafe fn last_error(loop_handle: *c_void) -> uv_err_t { - return rust_uv_last_error(loop_handle); -} - -pub unsafe fn strerror(err: *uv_err_t) -> *c_char { - return rust_uv_strerror(err); -} -pub unsafe fn err_name(err: *uv_err_t) -> *c_char { - return rust_uv_err_name(err); -} - -pub unsafe fn async_init(loop_handle: *c_void, async_handle: *uv_async_t, cb: *u8) -> c_int { - return rust_uv_async_init(loop_handle, async_handle, cb); -} - -pub unsafe fn async_send(async_handle: *uv_async_t) { - return rust_uv_async_send(async_handle); -} -pub unsafe fn buf_init(input: *u8, len: uint) -> uv_buf_t { - let out_buf = uv_buf_t { base: ptr::null(), len: 0 as size_t }; - let out_buf_ptr = ptr::to_unsafe_ptr(&out_buf); - rust_uv_buf_init(out_buf_ptr, input, len as size_t); - return out_buf; -} - -pub unsafe fn timer_init(loop_ptr: *c_void, timer_ptr: *uv_timer_t) -> c_int { - return rust_uv_timer_init(loop_ptr, timer_ptr); -} -pub unsafe fn timer_start(timer_ptr: *uv_timer_t, cb: *u8, timeout: uint, - repeat: uint) -> c_int { - return rust_uv_timer_start(timer_ptr, cb, timeout as c_uint, repeat as c_uint); -} -pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int { - return rust_uv_timer_stop(timer_ptr); -} - -pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in { - do str::as_c_str(ip) |ip_buf| { - rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int) - } -} -pub unsafe fn malloc_ip6_addr(ip: &str, port: int) -> *sockaddr_in6 { - do str::as_c_str(ip) |ip_buf| { - rust_uv_ip6_addrp(ip_buf as *u8, port as libc::c_int) - } -} - -pub unsafe fn free_ip4_addr(addr: *sockaddr_in) { - rust_uv_free_ip4_addr(addr); -} - -pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { - rust_uv_free_ip6_addr(addr); -} - -// data access helpers -pub unsafe fn get_loop_for_uv_handle(handle: *T) -> *c_void { - return rust_uv_get_loop_for_uv_handle(handle as *c_void); -} -pub unsafe fn get_stream_handle_from_connect_req(connect: *uv_connect_t) -> *uv_stream_t { - return rust_uv_get_stream_handle_from_connect_req(connect); -} -pub unsafe fn get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t { - return rust_uv_get_stream_handle_from_write_req(write_req); -} -pub unsafe fn get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void { - rust_uv_get_data_for_uv_loop(loop_ptr) -} -pub unsafe fn set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void) { - rust_uv_set_data_for_uv_loop(loop_ptr, data); -} -pub unsafe fn get_data_for_uv_handle(handle: *T) -> *c_void { - return rust_uv_get_data_for_uv_handle(handle as *c_void); -} -pub unsafe fn set_data_for_uv_handle(handle: *T, data: *U) { - rust_uv_set_data_for_uv_handle(handle as *c_void, data as *c_void); -} -pub unsafe fn get_data_for_req(req: *T) -> *c_void { - return rust_uv_get_data_for_req(req as *c_void); -} -pub unsafe fn set_data_for_req(req: *T, data: *U) { - rust_uv_set_data_for_req(req as *c_void, data as *c_void); -} -pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 { - return rust_uv_get_base_from_buf(buf); -} -pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t { - return rust_uv_get_len_from_buf(buf); -} -pub unsafe fn malloc_buf_base_of(suggested_size: size_t) -> *u8 { - return rust_uv_malloc_buf_base_of(suggested_size); -} -pub unsafe fn free_base_of_buf(buf: uv_buf_t) { - rust_uv_free_base_of_buf(buf); -} - -pub unsafe fn get_last_err_info(uv_loop: *c_void) -> ~str { - let err = last_error(uv_loop); - let err_ptr = ptr::to_unsafe_ptr(&err); - let err_name = str::raw::from_c_str(err_name(err_ptr)); - let err_msg = str::raw::from_c_str(strerror(err_ptr)); - return fmt!("LIBUV ERROR: name: %s msg: %s", - err_name, err_msg); -} - -pub unsafe fn get_last_err_data(uv_loop: *c_void) -> uv_err_data { - let err = last_error(uv_loop); - let err_ptr = ptr::to_unsafe_ptr(&err); - let err_name = str::raw::from_c_str(err_name(err_ptr)); - let err_msg = str::raw::from_c_str(strerror(err_ptr)); - uv_err_data { err_name: err_name, err_msg: err_msg } -} - -pub struct uv_err_data { - err_name: ~str, - err_msg: ~str, -} - -extern { - - fn rust_uv_handle_size(type_: uintptr_t) -> size_t; - fn rust_uv_req_size(type_: uintptr_t) -> size_t; - fn rust_uv_handle_type_max() -> uintptr_t; - fn rust_uv_req_type_max() -> uintptr_t; - - // libuv public API - fn rust_uv_loop_new() -> *c_void; - fn rust_uv_loop_delete(lp: *c_void); - fn rust_uv_run(loop_handle: *c_void); - fn rust_uv_close(handle: *c_void, cb: *u8); - fn rust_uv_walk(loop_handle: *c_void, cb: *u8, arg: *c_void); - - fn rust_uv_idle_new() -> *uv_idle_t; - fn rust_uv_idle_delete(handle: *uv_idle_t); - fn rust_uv_idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int; - fn rust_uv_idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int; - fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int; - - fn rust_uv_async_send(handle: *uv_async_t); - fn rust_uv_async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: *u8) -> c_int; - fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int; - // FIXME ref #2604 .. ? - fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t); - fn rust_uv_last_error(loop_handle: *c_void) -> uv_err_t; - // FIXME ref #2064 - fn rust_uv_strerror(err: *uv_err_t) -> *c_char; - // FIXME ref #2064 - fn rust_uv_err_name(err: *uv_err_t) -> *c_char; - fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; - fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; - fn rust_uv_free_ip4_addr(addr: *sockaddr_in); - fn rust_uv_free_ip6_addr(addr: *sockaddr_in6); - fn rust_uv_ip4_name(src: *sockaddr_in, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; - fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; - // FIXME ref #2064 - fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - after_cb: *u8, - addr: *sockaddr_in) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - after_cb: *u8, - addr: *sockaddr_in6) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, - name: *sockaddr_in) -> c_int; - fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, - name: *sockaddr_in6) ->c_int; - fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int; - fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; - fn rust_uv_write(req: *c_void, - stream: *c_void, - buf_in: *uv_buf_t, - buf_cnt: c_int, - cb: *u8) -> c_int; - fn rust_uv_read_start(stream: *c_void, - on_alloc: *u8, - on_read: *u8) -> c_int; - fn rust_uv_read_stop(stream: *c_void) -> c_int; - fn rust_uv_timer_init(loop_handle: *c_void, - timer_handle: *uv_timer_t) -> c_int; - fn rust_uv_timer_start(timer_handle: *uv_timer_t, - cb: *u8, - timeout: c_uint, - repeat: c_uint) -> c_int; - fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int; - - fn rust_uv_malloc_buf_base_of(sug_size: size_t) -> *u8; - fn rust_uv_free_base_of_buf(buf: uv_buf_t); - fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t; - fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t; - fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void; - fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void; - fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void); - fn rust_uv_get_data_for_uv_handle(handle: *c_void) -> *c_void; - fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void); - fn rust_uv_get_data_for_req(req: *c_void) -> *c_void; - fn rust_uv_set_data_for_req(req: *c_void, data: *c_void); - fn rust_uv_get_base_from_buf(buf: uv_buf_t) -> *u8; - fn rust_uv_get_len_from_buf(buf: uv_buf_t) -> size_t; -} From eb11274919f96331bc21702ce95e77e973d76109 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 12 Jun 2013 14:15:44 -0700 Subject: [PATCH 037/111] Removing redundant libuv bindings --- src/libstd/rt/uvio.rs | 459 ------------------------------------------ src/libstd/rt/uvll.rs | 443 ---------------------------------------- 2 files changed, 902 deletions(-) delete mode 100644 src/libstd/rt/uvio.rs delete mode 100644 src/libstd/rt/uvll.rs diff --git a/src/libstd/rt/uvio.rs b/src/libstd/rt/uvio.rs deleted file mode 100644 index 24bffd8d1cd24..0000000000000 --- a/src/libstd/rt/uvio.rs +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use option::*; -use result::*; - -use super::io::net::ip::IpAddr; -use super::uv::*; -use super::rtio::*; -use ops::Drop; -use old_iter::CopyableIter; -use cell::{Cell, empty_cell}; -use cast::transmute; -use super::sched::{Scheduler, local_sched}; - -#[cfg(test)] use container::Container; -#[cfg(test)] use uint; -#[cfg(test)] use unstable::run_in_bare_thread; -#[cfg(test)] use super::test::*; - -pub struct UvEventLoop { - uvio: UvIoFactory -} - -pub impl UvEventLoop { - fn new() -> UvEventLoop { - UvEventLoop { - uvio: UvIoFactory(Loop::new()) - } - } - - /// A convenience constructor - fn new_scheduler() -> Scheduler { - Scheduler::new(~UvEventLoop::new()) - } -} - -impl Drop for UvEventLoop { - fn finalize(&self) { - // XXX: Need mutable finalizer - let this = unsafe { - transmute::<&UvEventLoop, &mut UvEventLoop>(self) - }; - this.uvio.uv_loop().close(); - } -} - -impl EventLoop for UvEventLoop { - - fn run(&mut self) { - self.uvio.uv_loop().run(); - } - - fn callback(&mut self, f: ~fn()) { - let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - do idle_watcher.start |idle_watcher, status| { - assert!(status.is_none()); - let mut idle_watcher = idle_watcher; - idle_watcher.stop(); - idle_watcher.close(); - f(); - } - } - - fn io<'a>(&'a mut self) -> Option<&'a mut IoFactoryObject> { - Some(&mut self.uvio) - } -} - -#[test] -fn test_callback_run_once() { - do run_in_bare_thread { - let mut event_loop = UvEventLoop::new(); - let mut count = 0; - let count_ptr: *mut int = &mut count; - do event_loop.callback { - unsafe { *count_ptr += 1 } - } - event_loop.run(); - assert!(count == 1); - } -} - -pub struct UvIoFactory(Loop); - -pub impl UvIoFactory { - fn uv_loop<'a>(&'a mut self) -> &'a mut Loop { - match self { &UvIoFactory(ref mut ptr) => ptr } - } -} - -impl IoFactory for UvIoFactory { - // Connect to an address and return a new stream - // NB: This blocks the task waiting on the connection. - // It would probably be better to return a future - fn connect(&mut self, addr: IpAddr) -> Option<~StreamObject> { - // Create a cell in the task to hold the result. We will fill - // the cell before resuming the task. - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - - // Block this task and take ownership, switch to scheduler context - do scheduler.deschedule_running_task_and_then |task| { - - rtdebug!("connect: entered scheduler context"); - do local_sched::borrow |scheduler| { - assert!(!scheduler.in_task_context()); - } - let mut tcp_watcher = TcpWatcher::new(self.uv_loop()); - let task_cell = Cell(task); - - // Wait for a connection - do tcp_watcher.connect(addr) |stream_watcher, status| { - rtdebug!("connect: in connect callback"); - let maybe_stream = if status.is_none() { - rtdebug!("status is none"); - Some(~UvStream(stream_watcher)) - } else { - rtdebug!("status is some"); - stream_watcher.close(||()); - None - }; - - // Store the stream in the task's stack - unsafe { (*result_cell_ptr).put_back(maybe_stream); } - - // Context switch - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } - - fn bind(&mut self, addr: IpAddr) -> Option<~TcpListenerObject> { - let mut watcher = TcpWatcher::new(self.uv_loop()); - watcher.bind(addr); - return Some(~UvTcpListener(watcher)); - } -} - -pub struct UvTcpListener(TcpWatcher); - -impl UvTcpListener { - fn watcher(&self) -> TcpWatcher { - match self { &UvTcpListener(w) => w } - } - - fn close(&self) { - // XXX: Need to wait until close finishes before returning - self.watcher().as_stream().close(||()); - } -} - -impl Drop for UvTcpListener { - fn finalize(&self) { - // XXX: Again, this never gets called. Use .close() instead - //self.watcher().as_stream().close(||()); - } -} - -impl TcpListener for UvTcpListener { - - fn listen(&mut self) -> Option<~StreamObject> { - rtdebug!("entering listen"); - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - - let server_tcp_watcher = self.watcher(); - - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - - do scheduler.deschedule_running_task_and_then |task| { - let task_cell = Cell(task); - let mut server_tcp_watcher = server_tcp_watcher; - do server_tcp_watcher.listen |server_stream_watcher, status| { - let maybe_stream = if status.is_none() { - let mut server_stream_watcher = server_stream_watcher; - let mut loop_ = loop_from_watcher(&server_stream_watcher); - let client_tcp_watcher = TcpWatcher::new(&mut loop_).as_stream(); - // XXX: Needs to be surfaced in interface - server_stream_watcher.accept(client_tcp_watcher); - Some(~UvStream::new(client_tcp_watcher)) - } else { - None - }; - - unsafe { (*result_cell_ptr).put_back(maybe_stream); } - - rtdebug!("resuming task from listen"); - // Context switch - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } -} - -pub struct UvStream(StreamWatcher); - -impl UvStream { - fn new(watcher: StreamWatcher) -> UvStream { - UvStream(watcher) - } - - fn watcher(&self) -> StreamWatcher { - match self { &UvStream(w) => w } - } - - // XXX: finalize isn't working for ~UvStream??? - fn close(&self) { - // XXX: Need to wait until this finishes before returning - self.watcher().close(||()); - } -} - -impl Drop for UvStream { - fn finalize(&self) { - rtdebug!("closing stream"); - //self.watcher().close(||()); - } -} - -impl Stream for UvStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - let watcher = self.watcher(); - let buf_ptr: *&mut [u8] = &buf; - do scheduler.deschedule_running_task_and_then |task| { - rtdebug!("read: entered scheduler context"); - do local_sched::borrow |scheduler| { - assert!(!scheduler.in_task_context()); - } - let mut watcher = watcher; - let task_cell = Cell(task); - // XXX: We shouldn't reallocate these callbacks every - // call to read - let alloc: AllocCallback = |_| unsafe { - slice_to_uv_buf(*buf_ptr) - }; - do watcher.read_start(alloc) |watcher, nread, _buf, status| { - - // Stop reading so that no read callbacks are - // triggered before the user calls `read` again. - // XXX: Is there a performance impact to calling - // stop here? - let mut watcher = watcher; - watcher.read_stop(); - - let result = if status.is_none() { - assert!(nread >= 0); - Ok(nread as uint) - } else { - Err(()) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } - - fn write(&mut self, buf: &[u8]) -> Result<(), ()> { - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; - let scheduler = local_sched::take(); - assert!(scheduler.in_task_context()); - let watcher = self.watcher(); - let buf_ptr: *&[u8] = &buf; - do scheduler.deschedule_running_task_and_then |task| { - let mut watcher = watcher; - let task_cell = Cell(task); - let buf = unsafe { &*buf_ptr }; - // XXX: OMGCOPIES - let buf = buf.to_vec(); - do watcher.write(buf) |_watcher, status| { - let result = if status.is_none() { - Ok(()) - } else { - Err(()) - }; - - unsafe { (*result_cell_ptr).put_back(result); } - - let scheduler = local_sched::take(); - scheduler.resume_task_immediately(task_cell.take()); - } - } - - assert!(!result_cell.is_empty()); - return result_cell.take(); - } -} - -#[test] -fn test_simple_io_no_connect() { - do run_in_newsched_task { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let addr = next_test_ip4(); - let maybe_chan = io.connect(addr); - assert!(maybe_chan.is_none()); - } -} - -#[test] -fn test_simple_tcp_server_and_client() { - do run_in_newsched_task { - let addr = next_test_ip4(); - - // Start the server first so it's listening when we connect - do spawntask_immediately { - unsafe { - let io = local_sched::unsafe_borrow_io(); - let mut listener = io.bind(addr).unwrap(); - let mut stream = listener.listen().unwrap(); - let mut buf = [0, .. 2048]; - let nread = stream.read(buf).unwrap(); - assert!(nread == 8); - for uint::range(0, nread) |i| { - rtdebug!("%u", buf[i] as uint); - assert!(buf[i] == i as u8); - } - stream.close(); - listener.close(); - } - } - - do spawntask_immediately { - unsafe { - let io = local_sched::unsafe_borrow_io(); - let mut stream = io.connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.close(); - } - } - } -} - -#[test] #[ignore(reason = "busted")] -fn test_read_and_block() { - do run_in_newsched_task { - let addr = next_test_ip4(); - - do spawntask_immediately { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let mut listener = io.bind(addr).unwrap(); - let mut stream = listener.listen().unwrap(); - let mut buf = [0, .. 2048]; - - let expected = 32; - let mut current = 0; - let mut reads = 0; - - while current < expected { - let nread = stream.read(buf).unwrap(); - for uint::range(0, nread) |i| { - let val = buf[i] as uint; - assert!(val == current % 8); - current += 1; - } - reads += 1; - - let scheduler = local_sched::take(); - // Yield to the other task in hopes that it - // will trigger a read callback while we are - // not ready for it - do scheduler.deschedule_running_task_and_then |task| { - let task = Cell(task); - do local_sched::borrow |scheduler| { - scheduler.task_queue.push_back(task.take()); - } - } - } - - // Make sure we had multiple reads - assert!(reads > 1); - - stream.close(); - listener.close(); - } - - do spawntask_immediately { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let mut stream = io.connect(addr).unwrap(); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.write([0, 1, 2, 3, 4, 5, 6, 7]); - stream.close(); - } - - } -} - -#[test] -fn test_read_read_read() { - do run_in_newsched_task { - let addr = next_test_ip4(); - static MAX: uint = 500000; - - do spawntask_immediately { - unsafe { - let io = local_sched::unsafe_borrow_io(); - let mut listener = io.bind(addr).unwrap(); - let mut stream = listener.listen().unwrap(); - let buf = [1, .. 2048]; - let mut total_bytes_written = 0; - while total_bytes_written < MAX { - stream.write(buf); - total_bytes_written += buf.len(); - } - stream.close(); - listener.close(); - } - } - - do spawntask_immediately { - let io = unsafe { local_sched::unsafe_borrow_io() }; - let mut stream = io.connect(addr).unwrap(); - let mut buf = [0, .. 2048]; - let mut total_bytes_read = 0; - while total_bytes_read < MAX { - let nread = stream.read(buf).unwrap(); - rtdebug!("read %u bytes", nread as uint); - total_bytes_read += nread; - for uint::range(0, nread) |i| { - assert!(buf[i] == 1); - } - } - rtdebug!("read %u bytes total", total_bytes_read as uint); - stream.close(); - } - } -} diff --git a/src/libstd/rt/uvll.rs b/src/libstd/rt/uvll.rs deleted file mode 100644 index 0d298bde6b508..0000000000000 --- a/src/libstd/rt/uvll.rs +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - * Low-level bindings to the libuv library. - * - * This module contains a set of direct, 'bare-metal' wrappers around - * the libuv C-API. - * - * We're not bothering yet to redefine uv's structs as Rust structs - * because they are quite large and change often between versions. - * The maintenance burden is just too high. Instead we use the uv's - * `uv_handle_size` and `uv_req_size` to find the correct size of the - * structs and allocate them on the heap. This can be revisited later. - * - * There are also a collection of helper functions to ease interacting - * with the low-level API. - * - * As new functionality, existant in uv.h, is added to the rust stdlib, - * the mappings should be added in this module. - */ - -#[allow(non_camel_case_types)]; // C types - -use libc::{size_t, c_int, c_uint, c_void, c_char, uintptr_t}; -use libc::{malloc, free}; -use prelude::*; - -pub struct uv_err_t { - code: c_int, - sys_errno_: c_int -} - -pub struct uv_buf_t { - base: *u8, - len: libc::size_t, -} - -pub type uv_handle_t = c_void; -pub type uv_loop_t = c_void; -pub type uv_idle_t = c_void; -pub type uv_tcp_t = c_void; -pub type uv_connect_t = c_void; -pub type uv_write_t = c_void; -pub type uv_async_t = c_void; -pub type uv_timer_t = c_void; -pub type uv_stream_t = c_void; -pub type uv_fs_t = c_void; - -pub type uv_idle_cb = *u8; - -pub type sockaddr_in = c_void; -pub type sockaddr_in6 = c_void; - -#[deriving(Eq)] -pub enum uv_handle_type { - UV_UNKNOWN_HANDLE, - UV_ASYNC, - UV_CHECK, - UV_FS_EVENT, - UV_FS_POLL, - UV_HANDLE, - UV_IDLE, - UV_NAMED_PIPE, - UV_POLL, - UV_PREPARE, - UV_PROCESS, - UV_STREAM, - UV_TCP, - UV_TIMER, - UV_TTY, - UV_UDP, - UV_SIGNAL, - UV_FILE, - UV_HANDLE_TYPE_MAX -} - -#[deriving(Eq)] -pub enum uv_req_type { - UV_UNKNOWN_REQ, - UV_REQ, - UV_CONNECT, - UV_WRITE, - UV_SHUTDOWN, - UV_UDP_SEND, - UV_FS, - UV_WORK, - UV_GETADDRINFO, - UV_REQ_TYPE_MAX -} - -pub unsafe fn malloc_handle(handle: uv_handle_type) -> *c_void { - assert!(handle != UV_UNKNOWN_HANDLE && handle != UV_HANDLE_TYPE_MAX); - let size = rust_uv_handle_size(handle as uint); - let p = malloc(size); - assert!(p.is_not_null()); - return p; -} - -pub unsafe fn free_handle(v: *c_void) { - free(v) -} - -pub unsafe fn malloc_req(req: uv_req_type) -> *c_void { - assert!(req != UV_UNKNOWN_REQ && req != UV_REQ_TYPE_MAX); - let size = rust_uv_req_size(req as uint); - let p = malloc(size); - assert!(p.is_not_null()); - return p; -} - -pub unsafe fn free_req(v: *c_void) { - free(v) -} - -#[test] -fn handle_sanity_check() { - unsafe { - assert!(UV_HANDLE_TYPE_MAX as uint == rust_uv_handle_type_max()); - } -} - -#[test] -fn request_sanity_check() { - unsafe { - assert!(UV_REQ_TYPE_MAX as uint == rust_uv_req_type_max()); - } -} - -pub unsafe fn loop_new() -> *c_void { - return rust_uv_loop_new(); -} - -pub unsafe fn loop_delete(loop_handle: *c_void) { - rust_uv_loop_delete(loop_handle); -} - -pub unsafe fn run(loop_handle: *c_void) { - rust_uv_run(loop_handle); -} - -pub unsafe fn close(handle: *T, cb: *u8) { - rust_uv_close(handle as *c_void, cb); -} - -pub unsafe fn walk(loop_handle: *c_void, cb: *u8, arg: *c_void) { - rust_uv_walk(loop_handle, cb, arg); -} - -pub unsafe fn idle_new() -> *uv_idle_t { - rust_uv_idle_new() -} - -pub unsafe fn idle_delete(handle: *uv_idle_t) { - rust_uv_idle_delete(handle) -} - -pub unsafe fn idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int { - rust_uv_idle_init(loop_handle, handle) -} - -pub unsafe fn idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int { - rust_uv_idle_start(handle, cb) -} - -pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int { - rust_uv_idle_stop(handle) -} - -pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { - return rust_uv_tcp_init(loop_handle, handle); -} - -// FIXME ref #2064 -pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in, - after_connect_cb: *u8) -> c_int { - return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, - after_connect_cb, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in6, - after_connect_cb: *u8) -> c_int { - return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, - after_connect_cb, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int { - return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int { - return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr); -} - -pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in) -> c_int { - return rust_uv_tcp_getpeername(tcp_handle_ptr, name); -} - -pub unsafe fn tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int { - return rust_uv_tcp_getpeername6(tcp_handle_ptr, name); -} - -pub unsafe fn listen(stream: *T, backlog: c_int, cb: *u8) -> c_int { - return rust_uv_listen(stream as *c_void, backlog, cb); -} - -pub unsafe fn accept(server: *c_void, client: *c_void) -> c_int { - return rust_uv_accept(server as *c_void, client as *c_void); -} - -pub unsafe fn write(req: *uv_write_t, stream: *T, buf_in: &[uv_buf_t], cb: *u8) -> c_int { - let buf_ptr = vec::raw::to_ptr(buf_in); - let buf_cnt = buf_in.len() as i32; - return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb); -} -pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: *u8, on_read: *u8) -> c_int { - return rust_uv_read_start(stream as *c_void, on_alloc, on_read); -} - -pub unsafe fn read_stop(stream: *uv_stream_t) -> c_int { - return rust_uv_read_stop(stream as *c_void); -} - -pub unsafe fn last_error(loop_handle: *c_void) -> uv_err_t { - return rust_uv_last_error(loop_handle); -} - -pub unsafe fn strerror(err: *uv_err_t) -> *c_char { - return rust_uv_strerror(err); -} -pub unsafe fn err_name(err: *uv_err_t) -> *c_char { - return rust_uv_err_name(err); -} - -pub unsafe fn async_init(loop_handle: *c_void, async_handle: *uv_async_t, cb: *u8) -> c_int { - return rust_uv_async_init(loop_handle, async_handle, cb); -} - -pub unsafe fn async_send(async_handle: *uv_async_t) { - return rust_uv_async_send(async_handle); -} -pub unsafe fn buf_init(input: *u8, len: uint) -> uv_buf_t { - let out_buf = uv_buf_t { base: ptr::null(), len: 0 as size_t }; - let out_buf_ptr = ptr::to_unsafe_ptr(&out_buf); - rust_uv_buf_init(out_buf_ptr, input, len as size_t); - return out_buf; -} - -pub unsafe fn timer_init(loop_ptr: *c_void, timer_ptr: *uv_timer_t) -> c_int { - return rust_uv_timer_init(loop_ptr, timer_ptr); -} -pub unsafe fn timer_start(timer_ptr: *uv_timer_t, cb: *u8, timeout: uint, - repeat: uint) -> c_int { - return rust_uv_timer_start(timer_ptr, cb, timeout as c_uint, repeat as c_uint); -} -pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int { - return rust_uv_timer_stop(timer_ptr); -} - -pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in { - do str::as_c_str(ip) |ip_buf| { - rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int) - } -} -pub unsafe fn malloc_ip6_addr(ip: &str, port: int) -> *sockaddr_in6 { - do str::as_c_str(ip) |ip_buf| { - rust_uv_ip6_addrp(ip_buf as *u8, port as libc::c_int) - } -} - -pub unsafe fn free_ip4_addr(addr: *sockaddr_in) { - rust_uv_free_ip4_addr(addr); -} - -pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { - rust_uv_free_ip6_addr(addr); -} - -// data access helpers -pub unsafe fn get_loop_for_uv_handle(handle: *T) -> *c_void { - return rust_uv_get_loop_for_uv_handle(handle as *c_void); -} -pub unsafe fn get_stream_handle_from_connect_req(connect: *uv_connect_t) -> *uv_stream_t { - return rust_uv_get_stream_handle_from_connect_req(connect); -} -pub unsafe fn get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t { - return rust_uv_get_stream_handle_from_write_req(write_req); -} -pub unsafe fn get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void { - rust_uv_get_data_for_uv_loop(loop_ptr) -} -pub unsafe fn set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void) { - rust_uv_set_data_for_uv_loop(loop_ptr, data); -} -pub unsafe fn get_data_for_uv_handle(handle: *T) -> *c_void { - return rust_uv_get_data_for_uv_handle(handle as *c_void); -} -pub unsafe fn set_data_for_uv_handle(handle: *T, data: *U) { - rust_uv_set_data_for_uv_handle(handle as *c_void, data as *c_void); -} -pub unsafe fn get_data_for_req(req: *T) -> *c_void { - return rust_uv_get_data_for_req(req as *c_void); -} -pub unsafe fn set_data_for_req(req: *T, data: *U) { - rust_uv_set_data_for_req(req as *c_void, data as *c_void); -} -pub unsafe fn get_base_from_buf(buf: uv_buf_t) -> *u8 { - return rust_uv_get_base_from_buf(buf); -} -pub unsafe fn get_len_from_buf(buf: uv_buf_t) -> size_t { - return rust_uv_get_len_from_buf(buf); -} -pub unsafe fn malloc_buf_base_of(suggested_size: size_t) -> *u8 { - return rust_uv_malloc_buf_base_of(suggested_size); -} -pub unsafe fn free_base_of_buf(buf: uv_buf_t) { - rust_uv_free_base_of_buf(buf); -} - -pub unsafe fn get_last_err_info(uv_loop: *c_void) -> ~str { - let err = last_error(uv_loop); - let err_ptr = ptr::to_unsafe_ptr(&err); - let err_name = str::raw::from_c_str(err_name(err_ptr)); - let err_msg = str::raw::from_c_str(strerror(err_ptr)); - return fmt!("LIBUV ERROR: name: %s msg: %s", - err_name, err_msg); -} - -pub unsafe fn get_last_err_data(uv_loop: *c_void) -> uv_err_data { - let err = last_error(uv_loop); - let err_ptr = ptr::to_unsafe_ptr(&err); - let err_name = str::raw::from_c_str(err_name(err_ptr)); - let err_msg = str::raw::from_c_str(strerror(err_ptr)); - uv_err_data { err_name: err_name, err_msg: err_msg } -} - -pub struct uv_err_data { - err_name: ~str, - err_msg: ~str, -} - -extern { - - fn rust_uv_handle_size(type_: uintptr_t) -> size_t; - fn rust_uv_req_size(type_: uintptr_t) -> size_t; - fn rust_uv_handle_type_max() -> uintptr_t; - fn rust_uv_req_type_max() -> uintptr_t; - - // libuv public API - fn rust_uv_loop_new() -> *c_void; - fn rust_uv_loop_delete(lp: *c_void); - fn rust_uv_run(loop_handle: *c_void); - fn rust_uv_close(handle: *c_void, cb: *u8); - fn rust_uv_walk(loop_handle: *c_void, cb: *u8, arg: *c_void); - - fn rust_uv_idle_new() -> *uv_idle_t; - fn rust_uv_idle_delete(handle: *uv_idle_t); - fn rust_uv_idle_init(loop_handle: *uv_loop_t, handle: *uv_idle_t) -> c_int; - fn rust_uv_idle_start(handle: *uv_idle_t, cb: uv_idle_cb) -> c_int; - fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int; - - fn rust_uv_async_send(handle: *uv_async_t); - fn rust_uv_async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: *u8) -> c_int; - fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int; - // FIXME ref #2604 .. ? - fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t); - fn rust_uv_last_error(loop_handle: *c_void) -> uv_err_t; - // FIXME ref #2064 - fn rust_uv_strerror(err: *uv_err_t) -> *c_char; - // FIXME ref #2064 - fn rust_uv_err_name(err: *uv_err_t) -> *c_char; - fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; - fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; - fn rust_uv_free_ip4_addr(addr: *sockaddr_in); - fn rust_uv_free_ip6_addr(addr: *sockaddr_in6); - fn rust_uv_ip4_name(src: *sockaddr_in, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; - fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; - fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; - // FIXME ref #2064 - fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - after_cb: *u8, - addr: *sockaddr_in) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - after_cb: *u8, - addr: *sockaddr_in6) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, - name: *sockaddr_in) -> c_int; - fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, - name: *sockaddr_in6) ->c_int; - fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int; - fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; - fn rust_uv_write(req: *c_void, - stream: *c_void, - buf_in: *uv_buf_t, - buf_cnt: c_int, - cb: *u8) -> c_int; - fn rust_uv_read_start(stream: *c_void, - on_alloc: *u8, - on_read: *u8) -> c_int; - fn rust_uv_read_stop(stream: *c_void) -> c_int; - fn rust_uv_timer_init(loop_handle: *c_void, - timer_handle: *uv_timer_t) -> c_int; - fn rust_uv_timer_start(timer_handle: *uv_timer_t, - cb: *u8, - timeout: c_uint, - repeat: c_uint) -> c_int; - fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int; - - fn rust_uv_malloc_buf_base_of(sug_size: size_t) -> *u8; - fn rust_uv_free_base_of_buf(buf: uv_buf_t); - fn rust_uv_get_stream_handle_from_connect_req(connect_req: *uv_connect_t) -> *uv_stream_t; - fn rust_uv_get_stream_handle_from_write_req(write_req: *uv_write_t) -> *uv_stream_t; - fn rust_uv_get_loop_for_uv_handle(handle: *c_void) -> *c_void; - fn rust_uv_get_data_for_uv_loop(loop_ptr: *c_void) -> *c_void; - fn rust_uv_set_data_for_uv_loop(loop_ptr: *c_void, data: *c_void); - fn rust_uv_get_data_for_uv_handle(handle: *c_void) -> *c_void; - fn rust_uv_set_data_for_uv_handle(handle: *c_void, data: *c_void); - fn rust_uv_get_data_for_req(req: *c_void) -> *c_void; - fn rust_uv_set_data_for_req(req: *c_void, data: *c_void); - fn rust_uv_get_base_from_buf(buf: uv_buf_t) -> *u8; - fn rust_uv_get_len_from_buf(buf: uv_buf_t) -> size_t; -} From 4224fc7aad3cfbd7093e55812e5a566d7aad3325 Mon Sep 17 00:00:00 2001 From: toddaaro Date: Wed, 12 Jun 2013 14:55:32 -0700 Subject: [PATCH 038/111] added functionality to tell schedulers to refuse to run tasks that are not pinned to them --- src/libstd/rt/sched.rs | 48 ++++++++++++++++++++++++++++++------------ src/libstd/rt/test.rs | 30 +++++++++++++++++++++----- 2 files changed, 60 insertions(+), 18 deletions(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 698cafdf8c615..4bc61d638248d 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -70,7 +70,9 @@ pub struct Scheduler { /// An action performed after a context switch on behalf of the /// code running before the context switch priv cleanup_job: Option, - metrics: SchedMetrics + metrics: SchedMetrics, + /// Should this scheduler run any task, or only pinned tasks? + run_anything: bool } pub struct SchedHandle { @@ -136,6 +138,16 @@ pub impl Scheduler { sleeper_list: SleeperList) -> Scheduler { + Scheduler::new_special(event_loop, work_queue, sleeper_list, true) + + } + + fn new_special(event_loop: ~EventLoopObject, + work_queue: WorkQueue<~Coroutine>, + sleeper_list: SleeperList, + run_anything: bool) + -> Scheduler { + // Lazily initialize the runtime TLS key local_ptr::init_tls_key(); @@ -150,7 +162,8 @@ pub impl Scheduler { saved_context: Context::empty(), current_task: None, cleanup_job: None, - metrics: SchedMetrics::new() + metrics: SchedMetrics::new(), + run_anything: run_anything } } @@ -429,19 +442,28 @@ pub impl Scheduler { assert!(!self.in_task_context()); rtdebug!("looking in work queue for task to schedule"); - let mut this = self; - match this.work_queue.pop() { - Some(task) => { - rtdebug!("resuming task from work queue"); - this.resume_task_immediately(task); - return true; - } - None => { - rtdebug!("no tasks in queue"); - Local::put(this); - return false; + + if this.run_anything { + match this.work_queue.pop() { + Some(task) => { + rtdebug!("resuming task from work queue"); + this.resume_task_immediately(task); + return true; + } + None => { + rtdebug!("no tasks in queue"); + Local::put(this); + return false; + } } + } else { + // In this branch we have a scheduler that is not allowed + // to run unpinned tasks. As such it will only get tasks + // to run from the message queue. + rtdebug!("skipping resume_task_from_queue"); + Local::put(this); + return false; } } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 97aa76d7db699..ecef505ce579d 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -157,23 +157,43 @@ pub fn run_in_mt_newsched_task_random_homed() { let mut handles = ~[]; let mut scheds = ~[]; - for uint::range(0, nthreads) |_| { + // create a few special schedulers, those with even indicies + // will be pinned-only + for uint::range(0, nthreads) |i| { + let special = (i % 2) == 0; let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); + let mut sched = ~Scheduler::new_special(loop_, work_queue.clone(), sleepers.clone(), special); let handle = sched.make_handle(); handles.push(handle); scheds.push(sched); - } + } // Schedule a pile o tasks - let n = 120*stress_factor(); + let n = 5*stress_factor(); for uint::range(0,n) |_i| { rtdebug!("creating task: %u", _i); let hf: ~fn() = || { assert!(true) }; spawntask_homed(&mut scheds, hf); } - let f: ~fn() = || { assert!(true); }; + // Now we want another pile o tasks that do not ever run on a + // special scheduler, because they are normal tasks. Because + // we can we put these in the "main" task. + + let n = 5*stress_factor(); + + let f: ~fn() = || { + for uint::range(0,n) |_| { + let f: ~fn() = || { + // Borrow the scheduler we run on and check if it is + // privliged. + do Local::borrow:: |sched| { + assert!(sched.run_anything); + }; + }; + spawntask_random(f); + }; + }; let f_cell = Cell(f); let handles = Cell(handles); From 39a575fb43d2ba0511d295b7e1a9178b4919e348 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 12 Jun 2013 16:01:50 -0700 Subject: [PATCH 039/111] Added libuv UDP function bindings. --- src/libstd/rt/uv/uvll.rs | 47 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index ddc9040d73091..8a3f02cb59b7a 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -56,12 +56,14 @@ pub type uv_handle_t = c_void; pub type uv_loop_t = c_void; pub type uv_idle_t = c_void; pub type uv_tcp_t = c_void; +pub type uv_udp_t = c_void; pub type uv_connect_t = c_void; pub type uv_write_t = c_void; pub type uv_async_t = c_void; pub type uv_timer_t = c_void; pub type uv_stream_t = c_void; pub type uv_fs_t = c_void; +pub type uv_udp_send_t = c_void; pub type uv_idle_cb = *u8; @@ -183,6 +185,40 @@ pub unsafe fn idle_stop(handle: *uv_idle_t) -> c_int { rust_uv_idle_stop(handle) } +pub unsafe fn udp_init(loop_handle: *uv_loop_t, handle: *uv_udp_t) -> c_int { + return rust_uv_udp_init(loop_handle, handle); +} + +pub unsafe fn udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int { + return rust_uv_udp_bind(server, addr, flags); +} + +pub unsafe fn udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int { + return rust_uv_udp_bind6(server, addr, flags); +} + +pub unsafe fn udp_send(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], + addr: *sockaddr_in, cb: *u8) -> c_int { + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return rust_uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb); +} + +pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], + addr: *sockaddr_in6, cb: *u8) -> c_int { + let buf_ptr = vec::raw::to_ptr(buf_in); + let buf_cnt = buf_in.len() as i32; + return rust_uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb); +} + +pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int { + return rust_uv_udp_recv_start(server, on_alloc, on_recv); +} + +pub unsafe fn udp_recv_stop(server: *uv_udp_t) -> c_int { + return rust_uv_udp_recv_stop(server); +} + pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { return rust_uv_tcp_init(loop_handle, handle); } @@ -417,6 +453,17 @@ extern { name: *sockaddr_in) -> c_int; fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int; + + fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int; + fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; + fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; + fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, + buf_cnt: c_int, addr: *sockaddr_in, cb: *u8) -> c_int; + fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, + buf_cnt: c_int, addr: *sockaddr_in6, cb: *u8) -> c_int; + fn rust_uv_udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int; + fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; + fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int; fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; fn rust_uv_write(req: *c_void, From 5393e43b5355284cf24f8afcc2088473fa5a318a Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Thu, 13 Jun 2013 12:51:32 -0700 Subject: [PATCH 040/111] Corrected libuv UDP bindings. --- src/libstd/rt/uv/uvll.rs | 4 ++-- src/rt/rust_uv.cpp | 37 +++++++++++++++++++++++++++++++++++++ src/rt/rustrt.def.in | 9 ++++++++- 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index 8a3f02cb59b7a..cebd498aa024a 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -201,14 +201,14 @@ pub unsafe fn udp_send(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], addr: *sockaddr_in, cb: *u8) -> c_int { let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb); + return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], addr: *sockaddr_in6, cb: *u8) -> c_int { let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send(req, handle, buf_ptr, buf_cnt, addr, cb); + return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int { diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index fefcbbcacf7d4..67a2f614b699a 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -293,6 +293,43 @@ rust_uv_tcp_getpeername6 return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen); } +extern "C" int +rust_uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { + return uv_udp_init(loop, handle); +} + +extern "C" int +rust_uv_udp_bind(uv_udp_t* server, sockaddr_in* addr_ptr, unsigned flags) { + return uv_udp_bind(server, *addr_ptr, flags); +} + +extern "C" int +rust_uv_udp_bind6(uv_udp_t* server, sockaddr_in6* addr_ptr, unsigned flags) { + return uv_udp_bind6(server, *addr_ptr, flags); +} + +extern "C" int +rust_uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, + int buf_cnt, sockaddr_in* addr_ptr, uv_udp_send_cb cb) { + return uv_udp_send(req, handle, buf_in, buf_cnt, *addr_ptr, cb); +} + +extern "C" int +rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, + int buf_cnt, sockaddr_in6* addr_ptr, uv_udp_send_cb cb) { + return uv_udp_send6(req, handle, buf_in, buf_cnt, *addr_ptr, cb); +} + +extern "C" int +rust_uv_udp_recv_start(uv_udp_t* server, uv_alloc_cb on_alloc, uv_udp_recv_cb on_read) { + return uv_udp_recv_start(server, on_alloc, on_read); +} + +extern "C" int +rust_uv_udp_recv_stop(uv_udp_t* server) { + return uv_udp_recv_stop(server); +} + extern "C" int rust_uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 9b49583519eca..cb7189c5a8321 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -104,6 +104,13 @@ rust_uv_tcp_connect rust_uv_tcp_bind rust_uv_tcp_connect6 rust_uv_tcp_bind6 +rust_uv_udp_init +rust_uv_udp_bind +rust_uv_udp_bind6 +rust_uv_udp_send +rust_uv_udp_send6 +rust_uv_udp_recv_start +rust_uv_udp_recv_stop rust_uv_listen rust_uv_accept rust_uv_write @@ -239,4 +246,4 @@ rust_valgrind_stack_deregister rust_take_env_lock rust_drop_env_lock rust_update_log_settings -rust_get_num_cpus \ No newline at end of file +rust_get_num_cpus From abc3a8aa1e76f3ecc3930e20453a52681843cec0 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sun, 2 Jun 2013 01:55:22 -0700 Subject: [PATCH 041/111] std::rt: Add JoinLatch This is supposed to be an efficient way to link the lifetimes of tasks into a tree. JoinLatches form a tree and when `release` is called they wait on children then signal the parent. This structure creates zombie tasks which currently keep the entire task allocated. Zombie tasks are supposed to be tombstoned but that code does not work correctly. --- src/libstd/rt/join_latch.rs | 645 ++++++++++++++++++++++++++++++++++++ src/libstd/rt/metrics.rs | 16 +- src/libstd/rt/mod.rs | 3 + src/libstd/rt/sched.rs | 5 +- 4 files changed, 665 insertions(+), 4 deletions(-) create mode 100644 src/libstd/rt/join_latch.rs diff --git a/src/libstd/rt/join_latch.rs b/src/libstd/rt/join_latch.rs new file mode 100644 index 0000000000000..6ffba992fdf41 --- /dev/null +++ b/src/libstd/rt/join_latch.rs @@ -0,0 +1,645 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The JoinLatch is a concurrent type that establishes the task +//! tree and propagates failure. +//! +//! Each task gets a JoinLatch that is derived from the JoinLatch +//! of its parent task. Every latch must be released by either calling +//! the non-blocking `release` method or the task-blocking `wait` method. +//! Releasing a latch does not complete until all of its child latches +//! complete. +//! +//! Latches carry a `success` flag that is set to `false` during task +//! failure and is propagated both from children to parents and parents +//! to children. The status af this flag may be queried for the purposes +//! of linked failure. +//! +//! In addition to failure propagation the task tree serves to keep the +//! default task schedulers alive. The runtime only sends the shutdown +//! message to schedulers once the root task exits. +//! +//! Under this scheme tasks that terminate before their children become +//! 'zombies' since they may not exit until their children do. Zombie +//! tasks are 'tombstoned' as `Tombstone(~JoinLatch)` and the tasks +//! themselves allowed to terminate. +//! +//! XXX: Propagate flag from parents to children. +//! XXX: Tombstoning actually doesn't work. +//! XXX: This could probably be done in a way that doesn't leak tombstones +//! longer than the life of the child tasks. + +use comm::{GenericPort, Peekable, GenericSmartChan}; +use clone::Clone; +use container::Container; +use option::{Option, Some, None}; +use ops::Drop; +use rt::comm::{SharedChan, Port, stream}; +use rt::local::Local; +use rt::sched::Scheduler; +use unstable::atomics::{AtomicUint, SeqCst}; +use util; +use vec::OwnedVector; + +// FIXME #7026: Would prefer this to be an enum +pub struct JoinLatch { + priv parent: Option, + priv child: Option, + closed: bool, +} + +// Shared between parents and all their children. +struct SharedState { + /// Reference count, held by a parent and all children. + count: AtomicUint, + success: bool +} + +struct ParentLink { + shared: *mut SharedState, + // For communicating with the parent. + chan: SharedChan +} + +struct ChildLink { + shared: ~SharedState, + // For receiving from children. + port: Port, + chan: SharedChan, + // Prevents dropping the child SharedState reference counts multiple times. + dropped_child: bool +} + +// Messages from child latches to parent. +enum Message { + Tombstone(~JoinLatch), + ChildrenTerminated +} + +impl JoinLatch { + pub fn new_root() -> ~JoinLatch { + let this = ~JoinLatch { + parent: None, + child: None, + closed: false + }; + rtdebug!("new root latch %x", this.id()); + return this; + } + + fn id(&self) -> uint { + unsafe { ::cast::transmute(&*self) } + } + + pub fn new_child(&mut self) -> ~JoinLatch { + rtassert!(!self.closed); + + if self.child.is_none() { + // This is the first time spawning a child + let shared = ~SharedState { + count: AtomicUint::new(1), + success: true + }; + let (port, chan) = stream(); + let chan = SharedChan::new(chan); + let child = ChildLink { + shared: shared, + port: port, + chan: chan, + dropped_child: false + }; + self.child = Some(child); + } + + let child_link: &mut ChildLink = self.child.get_mut_ref(); + let shared_state: *mut SharedState = &mut *child_link.shared; + + child_link.shared.count.fetch_add(1, SeqCst); + + let child = ~JoinLatch { + parent: Some(ParentLink { + shared: shared_state, + chan: child_link.chan.clone() + }), + child: None, + closed: false + }; + rtdebug!("NEW child latch %x", child.id()); + return child; + } + + pub fn release(~self, local_success: bool) { + // XXX: This should not block, but there's a bug in the below + // code that I can't figure out. + self.wait(local_success); + } + + // XXX: Should not require ~self + fn release_broken(~self, local_success: bool) { + rtassert!(!self.closed); + + rtdebug!("releasing %x", self.id()); + + let id = self.id(); + let _ = id; // XXX: `id` is only used in debug statements so appears unused + let mut this = self; + let mut child_success = true; + let mut children_done = false; + + if this.child.is_some() { + rtdebug!("releasing children"); + let child_link: &mut ChildLink = this.child.get_mut_ref(); + let shared: &mut SharedState = &mut *child_link.shared; + + if !child_link.dropped_child { + let last_count = shared.count.fetch_sub(1, SeqCst); + rtdebug!("child count before sub %u %x", last_count, id); + if last_count == 1 { + assert!(child_link.chan.try_send(ChildrenTerminated)); + } + child_link.dropped_child = true; + } + + // Wait for messages from children + let mut tombstones = ~[]; + loop { + if child_link.port.peek() { + match child_link.port.recv() { + Tombstone(t) => { + tombstones.push(t); + }, + ChildrenTerminated => { + children_done = true; + break; + } + } + } else { + break + } + } + + rtdebug!("releasing %u tombstones %x", tombstones.len(), id); + + // Try to release the tombstones. Those that still have + // outstanding will be re-enqueued. When this task's + // parents release their latch we'll end up back here + // trying them again. + while !tombstones.is_empty() { + tombstones.pop().release(true); + } + + if children_done { + let count = shared.count.load(SeqCst); + assert!(count == 0); + // self_count is the acquire-read barrier + child_success = shared.success; + } + } else { + children_done = true; + } + + let total_success = local_success && child_success; + + rtassert!(this.parent.is_some()); + + unsafe { + { + let parent_link: &mut ParentLink = this.parent.get_mut_ref(); + let shared: *mut SharedState = parent_link.shared; + + if !total_success { + // parent_count is the write-wait barrier + (*shared).success = false; + } + } + + if children_done { + rtdebug!("children done"); + do Local::borrow:: |sched| { + sched.metrics.release_tombstone += 1; + } + { + rtdebug!("RELEASING parent %x", id); + let parent_link: &mut ParentLink = this.parent.get_mut_ref(); + let shared: *mut SharedState = parent_link.shared; + let last_count = (*shared).count.fetch_sub(1, SeqCst); + rtdebug!("count before parent sub %u %x", last_count, id); + if last_count == 1 { + assert!(parent_link.chan.try_send(ChildrenTerminated)); + } + } + this.closed = true; + util::ignore(this); + } else { + rtdebug!("children not done"); + rtdebug!("TOMBSTONING %x", id); + do Local::borrow:: |sched| { + sched.metrics.release_no_tombstone += 1; + } + let chan = { + let parent_link: &mut ParentLink = this.parent.get_mut_ref(); + parent_link.chan.clone() + }; + assert!(chan.try_send(Tombstone(this))); + } + } + } + + // XXX: Should not require ~self + pub fn wait(~self, local_success: bool) -> bool { + rtassert!(!self.closed); + + rtdebug!("WAITING %x", self.id()); + + let mut this = self; + let mut child_success = true; + + if this.child.is_some() { + rtdebug!("waiting for children"); + let child_link: &mut ChildLink = this.child.get_mut_ref(); + let shared: &mut SharedState = &mut *child_link.shared; + + if !child_link.dropped_child { + let last_count = shared.count.fetch_sub(1, SeqCst); + rtdebug!("child count before sub %u", last_count); + if last_count == 1 { + assert!(child_link.chan.try_send(ChildrenTerminated)); + } + child_link.dropped_child = true; + } + + // Wait for messages from children + loop { + match child_link.port.recv() { + Tombstone(t) => { + t.wait(true); + } + ChildrenTerminated => break + } + } + + let count = shared.count.load(SeqCst); + if count != 0 { ::io::println(fmt!("%u", count)); } + assert!(count == 0); + // self_count is the acquire-read barrier + child_success = shared.success; + } + + let total_success = local_success && child_success; + + if this.parent.is_some() { + rtdebug!("releasing parent"); + unsafe { + let parent_link: &mut ParentLink = this.parent.get_mut_ref(); + let shared: *mut SharedState = parent_link.shared; + + if !total_success { + // parent_count is the write-wait barrier + (*shared).success = false; + } + + let last_count = (*shared).count.fetch_sub(1, SeqCst); + rtdebug!("count before parent sub %u", last_count); + if last_count == 1 { + assert!(parent_link.chan.try_send(ChildrenTerminated)); + } + } + } + + this.closed = true; + util::ignore(this); + + return total_success; + } +} + +impl Drop for JoinLatch { + fn finalize(&self) { + rtdebug!("DESTROYING %x", self.id()); + rtassert!(self.closed); + } +} + +#[cfg(test)] +mod test { + use super::*; + use cell::Cell; + use container::Container; + use iter::Times; + use old_iter::BaseIter; + use rt::test::*; + use rand; + use rand::RngUtil; + use vec::{CopyableVector, ImmutableVector}; + + #[test] + fn success_immediately() { + do run_in_newsched_task { + let mut latch = JoinLatch::new_root(); + + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_immediately { + let child_latch = child_latch.take(); + assert!(child_latch.wait(true)); + } + + assert!(latch.wait(true)); + } + } + + #[test] + fn success_later() { + do run_in_newsched_task { + let mut latch = JoinLatch::new_root(); + + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_later { + let child_latch = child_latch.take(); + assert!(child_latch.wait(true)); + } + + assert!(latch.wait(true)); + } + } + + #[test] + fn mt_success() { + do run_in_mt_newsched_task { + let mut latch = JoinLatch::new_root(); + + for 10.times { + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_random { + let child_latch = child_latch.take(); + assert!(child_latch.wait(true)); + } + } + + assert!(latch.wait(true)); + } + } + + #[test] + fn mt_failure() { + do run_in_mt_newsched_task { + let mut latch = JoinLatch::new_root(); + + let spawn = |status| { + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_random { + let child_latch = child_latch.take(); + child_latch.wait(status); + } + }; + + for 10.times { spawn(true) } + spawn(false); + for 10.times { spawn(true) } + + assert!(!latch.wait(true)); + } + } + + #[test] + fn mt_multi_level_success() { + do run_in_mt_newsched_task { + let mut latch = JoinLatch::new_root(); + + fn child(latch: &mut JoinLatch, i: int) { + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_random { + let mut child_latch = child_latch.take(); + if i != 0 { + child(&mut *child_latch, i - 1); + child_latch.wait(true); + } else { + child_latch.wait(true); + } + } + } + + child(&mut *latch, 10); + + assert!(latch.wait(true)); + } + } + + #[test] + fn mt_multi_level_failure() { + do run_in_mt_newsched_task { + let mut latch = JoinLatch::new_root(); + + fn child(latch: &mut JoinLatch, i: int) { + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_random { + let mut child_latch = child_latch.take(); + if i != 0 { + child(&mut *child_latch, i - 1); + child_latch.wait(false); + } else { + child_latch.wait(true); + } + } + } + + child(&mut *latch, 10); + + assert!(!latch.wait(true)); + } + } + + #[test] + fn release_child() { + do run_in_newsched_task { + let mut latch = JoinLatch::new_root(); + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + + do spawntask_immediately { + let latch = child_latch.take(); + latch.release(false); + } + + assert!(!latch.wait(true)); + } + } + + #[test] + fn release_child_tombstone() { + do run_in_newsched_task { + let mut latch = JoinLatch::new_root(); + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + + do spawntask_immediately { + let mut latch = child_latch.take(); + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_later { + let latch = child_latch.take(); + latch.release(false); + } + latch.release(true); + } + + assert!(!latch.wait(true)); + } + } + + #[test] + fn release_child_no_tombstone() { + do run_in_newsched_task { + let mut latch = JoinLatch::new_root(); + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + + do spawntask_later { + let mut latch = child_latch.take(); + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + do spawntask_immediately { + let latch = child_latch.take(); + latch.release(false); + } + latch.release(true); + } + + assert!(!latch.wait(true)); + } + } + + #[test] + fn release_child_tombstone_stress() { + fn rand_orders() -> ~[bool] { + let mut v = ~[false,.. 5]; + v[0] = true; + let mut rng = rand::rng(); + return rng.shuffle(v); + } + + fn split_orders(orders: &[bool]) -> (~[bool], ~[bool]) { + if orders.is_empty() { + return (~[], ~[]); + } else if orders.len() <= 2 { + return (orders.to_owned(), ~[]); + } + let mut rng = rand::rng(); + let n = rng.gen_uint_range(1, orders.len()); + let first = orders.slice(0, n).to_owned(); + let last = orders.slice(n, orders.len()).to_owned(); + assert!(first.len() + last.len() == orders.len()); + return (first, last); + } + + for stress_factor().times { + do run_in_newsched_task { + fn doit(latch: &mut JoinLatch, orders: ~[bool], depth: uint) { + let (my_orders, remaining_orders) = split_orders(orders); + rtdebug!("(my_orders, remaining): %?", (&my_orders, &remaining_orders)); + rtdebug!("depth: %u", depth); + let mut remaining_orders = remaining_orders; + let mut num = 0; + for my_orders.each |&order| { + let child_latch = latch.new_child(); + let child_latch = Cell(child_latch); + let (child_orders, remaining) = split_orders(remaining_orders); + rtdebug!("(child_orders, remaining): %?", (&child_orders, &remaining)); + remaining_orders = remaining; + let child_orders = Cell(child_orders); + let child_num = num; + let _ = child_num; // XXX unused except in rtdebug! + do spawntask_random { + rtdebug!("depth %u num %u", depth, child_num); + let mut child_latch = child_latch.take(); + let child_orders = child_orders.take(); + doit(&mut *child_latch, child_orders, depth + 1); + child_latch.release(order); + } + + num += 1; + } + } + + let mut latch = JoinLatch::new_root(); + let orders = rand_orders(); + rtdebug!("orders: %?", orders); + + doit(&mut *latch, orders, 0); + + assert!(!latch.wait(true)); + } + } + } + + #[test] + fn whateverman() { + struct Order { + immediate: bool, + succeed: bool, + orders: ~[Order] + } + fn next(latch: &mut JoinLatch, orders: ~[Order]) { + for orders.each |order| { + let suborders = copy order.orders; + let child_latch = Cell(latch.new_child()); + let succeed = order.succeed; + if order.immediate { + do spawntask_immediately { + let mut child_latch = child_latch.take(); + next(&mut *child_latch, copy suborders); + rtdebug!("immediate releasing"); + child_latch.release(succeed); + } + } else { + do spawntask_later { + let mut child_latch = child_latch.take(); + next(&mut *child_latch, copy suborders); + rtdebug!("later releasing"); + child_latch.release(succeed); + } + } + } + } + + do run_in_newsched_task { + let mut latch = JoinLatch::new_root(); + let orders = ~[ Order { // 0 0 + immediate: true, + succeed: true, + orders: ~[ Order { // 1 0 + immediate: true, + succeed: false, + orders: ~[ Order { // 2 0 + immediate: false, + succeed: false, + orders: ~[ Order { // 3 0 + immediate: true, + succeed: false, + orders: ~[] + }, Order { // 3 1 + immediate: false, + succeed: false, + orders: ~[] + }] + }] + }] + }]; + + next(&mut *latch, orders); + assert!(!latch.wait(true)); + } + } +} diff --git a/src/libstd/rt/metrics.rs b/src/libstd/rt/metrics.rs index 70e347fdfb6ac..b0c0fa5d70862 100644 --- a/src/libstd/rt/metrics.rs +++ b/src/libstd/rt/metrics.rs @@ -34,7 +34,11 @@ pub struct SchedMetrics { // Message receives that do not block the receiver rendezvous_recvs: uint, // Message receives that block the receiver - non_rendezvous_recvs: uint + non_rendezvous_recvs: uint, + // JoinLatch releases that create tombstones + release_tombstone: uint, + // JoinLatch releases that do not create tombstones + release_no_tombstone: uint, } impl SchedMetrics { @@ -51,7 +55,9 @@ impl SchedMetrics { rendezvous_sends: 0, non_rendezvous_sends: 0, rendezvous_recvs: 0, - non_rendezvous_recvs: 0 + non_rendezvous_recvs: 0, + release_tombstone: 0, + release_no_tombstone: 0 } } } @@ -70,6 +76,8 @@ impl ToStr for SchedMetrics { non_rendezvous_sends: %u\n\ rendezvous_recvs: %u\n\ non_rendezvous_recvs: %u\n\ + release_tombstone: %u\n\ + release_no_tombstone: %u\n\ ", self.turns, self.messages_received, @@ -82,7 +90,9 @@ impl ToStr for SchedMetrics { self.rendezvous_sends, self.non_rendezvous_sends, self.rendezvous_recvs, - self.non_rendezvous_recvs + self.non_rendezvous_recvs, + self.release_tombstone, + self.release_no_tombstone ) } } \ No newline at end of file diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index caf3e15e535af..2008c4a180f60 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -133,6 +133,9 @@ pub mod local_ptr; /// Bindings to pthread/windows thread-local storage. pub mod thread_local_storage; +/// A concurrent data structure with which parent tasks wait on child tasks. +pub mod join_latch; + pub mod metrics; diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 97a1c26ed4dc4..104eb4b8baefa 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -151,7 +151,10 @@ pub impl Scheduler { // XXX: Reenable this once we're using a per-task queue. With a shared // queue this is not true //assert!(sched.work_queue.is_empty()); - rtdebug!("scheduler metrics: %s\n", sched.metrics.to_str()); + rtdebug!("scheduler metrics: %s\n", { + use to_str::ToStr; + sched.metrics.to_str() + }); return sched; } From fd148cd3e2d08ce15272f0690f6e41d2e85ee721 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 13 Jun 2013 22:43:20 -0700 Subject: [PATCH 042/111] std::rt: Change the Task constructors to reflect a tree --- src/libstd/rt/mod.rs | 4 ++-- src/libstd/rt/sched.rs | 22 +++++++++---------- src/libstd/rt/task.rs | 26 +++++++++++++++++++++-- src/libstd/rt/test.rs | 46 +++++++++++++++++++++++++++++++--------- src/libstd/task/spawn.rs | 9 +++++++- 5 files changed, 81 insertions(+), 26 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 2008c4a180f60..a65b07fdbcf25 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -167,7 +167,7 @@ pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { let sleepers = SleeperList::new(); let mut sched = ~Scheduler::new(loop_, work_queue, sleepers); sched.no_sleep = true; - let main_task = ~Coroutine::new(&mut sched.stack_pool, main); + let main_task = ~Coroutine::new_root(&mut sched.stack_pool, main); sched.enqueue_task(main_task); sched.run(); @@ -241,7 +241,7 @@ fn test_context() { do run_in_bare_thread { assert_eq!(context(), GlobalContext); let mut sched = ~new_test_uv_sched(); - let task = ~do Coroutine::new(&mut sched.stack_pool) { + let task = ~do Coroutine::new_root(&mut sched.stack_pool) { assert_eq!(context(), TaskContext); let sched = Local::take::(); do sched.deschedule_running_task_and_then() |sched, task| { diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 104eb4b8baefa..9abcc2ec3cc92 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -518,8 +518,8 @@ impl SchedHandle { } pub impl Coroutine { - fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { - Coroutine::with_task(stack_pool, ~Task::new(), start) + fn new_root(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { + Coroutine::with_task(stack_pool, ~Task::new_root(), start) } fn with_task(stack_pool: &mut StackPool, @@ -614,7 +614,7 @@ mod test { let task_ran_ptr: *mut bool = &mut task_ran; let mut sched = ~new_test_uv_sched(); - let task = ~do Coroutine::new(&mut sched.stack_pool) { + let task = ~do Coroutine::new_root(&mut sched.stack_pool) { unsafe { *task_ran_ptr = true; } }; sched.enqueue_task(task); @@ -632,7 +632,7 @@ mod test { let mut sched = ~new_test_uv_sched(); for int::range(0, total) |_| { - let task = ~do Coroutine::new(&mut sched.stack_pool) { + let task = ~do Coroutine::new_root(&mut sched.stack_pool) { unsafe { *task_count_ptr = *task_count_ptr + 1; } }; sched.enqueue_task(task); @@ -649,10 +649,10 @@ mod test { let count_ptr: *mut int = &mut count; let mut sched = ~new_test_uv_sched(); - let task1 = ~do Coroutine::new(&mut sched.stack_pool) { + let task1 = ~do Coroutine::new_root(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; } let mut sched = Local::take::(); - let task2 = ~do Coroutine::new(&mut sched.stack_pool) { + let task2 = ~do Coroutine::new_root(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; } }; // Context switch directly to the new task @@ -677,7 +677,7 @@ mod test { let mut sched = ~new_test_uv_sched(); - let start_task = ~do Coroutine::new(&mut sched.stack_pool) { + let start_task = ~do Coroutine::new_root(&mut sched.stack_pool) { run_task(count_ptr); }; sched.enqueue_task(start_task); @@ -687,7 +687,7 @@ mod test { fn run_task(count_ptr: *mut int) { do Local::borrow:: |sched| { - let task = ~do Coroutine::new(&mut sched.stack_pool) { + let task = ~do Coroutine::new_root(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; if *count_ptr != MAX { @@ -705,7 +705,7 @@ mod test { fn test_block_task() { do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); - let task = ~do Coroutine::new(&mut sched.stack_pool) { + let task = ~do Coroutine::new_root(&mut sched.stack_pool) { let sched = Local::take::(); assert!(sched.in_task_context()); do sched.deschedule_running_task_and_then() |sched, task| { @@ -752,13 +752,13 @@ mod test { let mut sched1 = ~new_test_uv_sched(); let handle1 = sched1.make_handle(); let handle1_cell = Cell(handle1); - let task1 = ~do Coroutine::new(&mut sched1.stack_pool) { + let task1 = ~do Coroutine::new_root(&mut sched1.stack_pool) { chan_cell.take().send(()); }; sched1.enqueue_task(task1); let mut sched2 = ~new_test_uv_sched(); - let task2 = ~do Coroutine::new(&mut sched2.stack_pool) { + let task2 = ~do Coroutine::new_root(&mut sched2.stack_pool) { port_cell.take().recv(); // Release the other scheduler's handle so it can exit handle1_cell.take(); diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index cf4967b12b304..10b4672df05b3 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -37,7 +37,7 @@ pub struct Unwinder { } impl Task { - pub fn new() -> Task { + pub fn new_root() -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, @@ -48,7 +48,29 @@ impl Task { } } - pub fn without_unwinding() -> Task { + pub fn new_root_without_unwinding() -> Task { + Task { + heap: LocalHeap::new(), + gc: GarbageCollector, + storage: LocalStorage(ptr::null(), None), + logger: StdErrLogger, + unwinder: None, + destroyed: false + } + } + + pub fn new_child(&mut self) -> Task { + Task { + heap: LocalHeap::new(), + gc: GarbageCollector, + storage: LocalStorage(ptr::null(), None), + logger: StdErrLogger, + unwinder: Some(Unwinder { unwinding: false }), + destroyed: false + } + } + + pub fn new_child_without_unwinding(&mut self) -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index c8df3a6120338..4a4d498a26e75 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -48,7 +48,7 @@ pub fn run_in_newsched_task(f: ~fn()) { do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::without_unwinding(), + ~Task::new_root_without_unwinding(), f.take()); sched.enqueue_task(task); sched.run(); @@ -94,7 +94,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let f_cell = Cell(f_cell.take()); let handles = Cell(handles); - let main_task = ~do Coroutine::new(&mut scheds[0].stack_pool) { + let main_task = ~do Coroutine::new_root(&mut scheds[0].stack_pool) { f_cell.take()(); let mut handles = handles.take(); @@ -132,9 +132,14 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { pub fn spawntask(f: ~fn()) { use super::sched::*; + let mut task = None; + do Local::borrow::() |running_task| { + task = Some(~running_task.new_child_without_unwinding()); + } + let mut sched = Local::take::(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::without_unwinding(), + task.swap_unwrap(), f); sched.schedule_new_task(task); } @@ -143,9 +148,14 @@ pub fn spawntask(f: ~fn()) { pub fn spawntask_immediately(f: ~fn()) { use super::sched::*; + let mut task = None; + do Local::borrow::() |running_task| { + task = Some(~running_task.new_child_without_unwinding()); + } + let mut sched = Local::take::(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::without_unwinding(), + task.swap_unwrap(), f); do sched.switch_running_tasks_and_then(task) |sched, task| { sched.enqueue_task(task); @@ -156,9 +166,14 @@ pub fn spawntask_immediately(f: ~fn()) { pub fn spawntask_later(f: ~fn()) { use super::sched::*; + let mut task = None; + do Local::borrow::() |running_task| { + task = Some(~running_task.new_child_without_unwinding()); + } + let mut sched = Local::take::(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::without_unwinding(), + task.swap_unwrap(), f); sched.enqueue_task(task); @@ -170,14 +185,19 @@ pub fn spawntask_random(f: ~fn()) { use super::sched::*; use rand::{Rand, rng}; - let mut rng = rng(); - let run_now: bool = Rand::rand(&mut rng); + let mut task = None; + do Local::borrow::() |running_task| { + task = Some(~running_task.new_child_without_unwinding()); + } let mut sched = Local::take::(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::without_unwinding(), + task.swap_unwrap(), f); + let mut rng = rng(); + let run_now: bool = Rand::rand(&mut rng); + if run_now { do sched.switch_running_tasks_and_then(task) |sched, task| { sched.enqueue_task(task); @@ -206,7 +226,7 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { do sched.deschedule_running_task_and_then() |sched, old_task| { let old_task = Cell(old_task); let f = f.take(); - let new_task = ~do Coroutine::new(&mut sched.stack_pool) { + let new_task = ~do Coroutine::new_root(&mut sched.stack_pool) { do (|| { (f.take())() }).finally { @@ -229,11 +249,17 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { pub fn spawntask_thread(f: ~fn()) -> Thread { use rt::sched::*; + let mut task = None; + do Local::borrow::() |running_task| { + task = Some(~running_task.new_child_without_unwinding()); + } + + let task = Cell(task.swap_unwrap()); let f = Cell(f); let thread = do Thread::start { let mut sched = ~new_test_uv_sched(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::without_unwinding(), + task.take(), f.take()); sched.enqueue_task(task); sched.run(); diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index 5941221821a85..a4fbec11d7233 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -91,6 +91,7 @@ use uint; use util; use unstable::sync::{Exclusive, exclusive}; use rt::local::Local; +use rt::task::Task; #[cfg(test)] use task::default_task_opts; @@ -576,8 +577,14 @@ pub fn spawn_raw(opts: TaskOpts, f: ~fn()) { fn spawn_raw_newsched(_opts: TaskOpts, f: ~fn()) { use rt::sched::*; + let mut task = None; + do Local::borrow::() |running_task| { + task = Some(~running_task.new_child_without_unwinding()); + } + let mut sched = Local::take::(); - let task = ~Coroutine::new(&mut sched.stack_pool, f); + let task = ~Coroutine::with_task(&mut sched.stack_pool, + task.swap_unwrap(), f); sched.schedule_new_task(task); } From 90fbe38f0064836fd5e169c520d3fd19953e5604 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 13 Jun 2013 23:16:27 -0700 Subject: [PATCH 043/111] std::rt: Tasks must have an unwinder. Simpler --- src/libstd/rt/task.rs | 39 ++++----------------------------------- src/libstd/rt/test.rs | 12 ++++++------ src/libstd/sys.rs | 6 +----- src/libstd/task/mod.rs | 11 +---------- src/libstd/task/spawn.rs | 2 +- 5 files changed, 13 insertions(+), 57 deletions(-) diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 10b4672df05b3..7c08dabf0bd89 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -25,7 +25,7 @@ pub struct Task { gc: GarbageCollector, storage: LocalStorage, logger: StdErrLogger, - unwinder: Option, + unwinder: Unwinder, destroyed: bool } @@ -43,18 +43,7 @@ impl Task { gc: GarbageCollector, storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, - unwinder: Some(Unwinder { unwinding: false }), - destroyed: false - } - } - - pub fn new_root_without_unwinding() -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(ptr::null(), None), - logger: StdErrLogger, - unwinder: None, + unwinder: Unwinder { unwinding: false }, destroyed: false } } @@ -65,18 +54,7 @@ impl Task { gc: GarbageCollector, storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, - unwinder: Some(Unwinder { unwinding: false }), - destroyed: false - } - } - - pub fn new_child_without_unwinding(&mut self) -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(ptr::null(), None), - logger: StdErrLogger, - unwinder: None, + unwinder: Unwinder { unwinding: false }, destroyed: false } } @@ -88,16 +66,7 @@ impl Task { assert!(ptr::ref_eq(task, self)); } - match self.unwinder { - Some(ref mut unwinder) => { - // If there's an unwinder then set up the catch block - unwinder.try(f); - } - None => { - // Otherwise, just run the body - f() - } - } + self.unwinder.try(f); self.destroy(); } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 4a4d498a26e75..ecfe93560b4b9 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -48,7 +48,7 @@ pub fn run_in_newsched_task(f: ~fn()) { do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::new_root_without_unwinding(), + ~Task::new_root(), f.take()); sched.enqueue_task(task); sched.run(); @@ -134,7 +134,7 @@ pub fn spawntask(f: ~fn()) { let mut task = None; do Local::borrow::() |running_task| { - task = Some(~running_task.new_child_without_unwinding()); + task = Some(~running_task.new_child()); } let mut sched = Local::take::(); @@ -150,7 +150,7 @@ pub fn spawntask_immediately(f: ~fn()) { let mut task = None; do Local::borrow::() |running_task| { - task = Some(~running_task.new_child_without_unwinding()); + task = Some(~running_task.new_child()); } let mut sched = Local::take::(); @@ -168,7 +168,7 @@ pub fn spawntask_later(f: ~fn()) { let mut task = None; do Local::borrow::() |running_task| { - task = Some(~running_task.new_child_without_unwinding()); + task = Some(~running_task.new_child()); } let mut sched = Local::take::(); @@ -187,7 +187,7 @@ pub fn spawntask_random(f: ~fn()) { let mut task = None; do Local::borrow::() |running_task| { - task = Some(~running_task.new_child_without_unwinding()); + task = Some(~running_task.new_child()); } let mut sched = Local::take::(); @@ -251,7 +251,7 @@ pub fn spawntask_thread(f: ~fn()) -> Thread { let mut task = None; do Local::borrow::() |running_task| { - task = Some(~running_task.new_child_without_unwinding()); + task = Some(~running_task.new_child()); } let task = Cell(task.swap_unwrap()); diff --git a/src/libstd/sys.rs b/src/libstd/sys.rs index 137070ce20211..77085d195678d 100644 --- a/src/libstd/sys.rs +++ b/src/libstd/sys.rs @@ -226,11 +226,7 @@ pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! { gc::cleanup_stack_for_failure(); let task = Local::unsafe_borrow::(); - let unwinder: &mut Option = &mut (*task).unwinder; - match *unwinder { - Some(ref mut unwinder) => unwinder.begin_unwind(), - None => abort!("failure without unwinder. aborting process") - } + (*task).unwinder.begin_unwind(); } } } diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index f24d2327358be..faa505c199513 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -515,16 +515,7 @@ pub fn failing() -> bool { _ => { let mut unwinding = false; do Local::borrow:: |local| { - unwinding = match local.unwinder { - Some(unwinder) => { - unwinder.unwinding - } - None => { - // Because there is no unwinder we can't be unwinding. - // (The process will abort on failure) - false - } - } + unwinding = local.unwinder.unwinding } return unwinding; } diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index a4fbec11d7233..a17a6777a98fb 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -579,7 +579,7 @@ fn spawn_raw_newsched(_opts: TaskOpts, f: ~fn()) { let mut task = None; do Local::borrow::() |running_task| { - task = Some(~running_task.new_child_without_unwinding()); + task = Some(~running_task.new_child()); } let mut sched = Local::take::(); From 74e72551930d4c58e747a60e2b39d3010e40d0ae Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Fri, 14 Jun 2013 11:39:46 -0700 Subject: [PATCH 044/111] Added a utility function to extract the udp handle from udp send requests. --- src/rt/rust_uv.cpp | 5 +++++ src/rt/rustrt.def.in | 1 + 2 files changed, 6 insertions(+) diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 67a2f614b699a..2fb9dc2f1a25c 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -330,6 +330,11 @@ rust_uv_udp_recv_stop(uv_udp_t* server) { return uv_udp_recv_stop(server); } +extern "C" uv_udp_t* +rust_uv_get_udp_handle_from_send_req(uv_udp_send_t* send_req) { + return send_req->handle; +} + extern "C" int rust_uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index cb7189c5a8321..4d7fa589f6fa7 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -111,6 +111,7 @@ rust_uv_udp_send rust_uv_udp_send6 rust_uv_udp_recv_start rust_uv_udp_recv_stop +rust_uv_get_udp_handle_from_send_req rust_uv_listen rust_uv_accept rust_uv_write From 03fe59aefa6ec84531335fd93e8dbf44dee65570 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Fri, 14 Jun 2013 11:54:56 -0700 Subject: [PATCH 045/111] added bindings to extract udp handle from udp send requests --- src/libstd/rt/uv/uvll.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index cebd498aa024a..4c0791b3b5c7c 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -66,6 +66,9 @@ pub type uv_fs_t = c_void; pub type uv_udp_send_t = c_void; pub type uv_idle_cb = *u8; +pub type uv_alloc_cb = *u8; +pub type uv_udp_send_cb = *u8; +pub type uv_udp_recv_cb = *u8; pub type sockaddr_in = c_void; pub type sockaddr_in6 = c_void; @@ -198,20 +201,20 @@ pub unsafe fn udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) - } pub unsafe fn udp_send(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], - addr: *sockaddr_in, cb: *u8) -> c_int { + addr: *sockaddr_in, cb: uv_udp_send_cb) -> c_int { let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], - addr: *sockaddr_in6, cb: *u8) -> c_int { + addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int { let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } -pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int { +pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, on_recv: uv_udp_recv_cb) -> c_int { return rust_uv_udp_recv_start(server, on_alloc, on_recv); } @@ -219,6 +222,10 @@ pub unsafe fn udp_recv_stop(server: *uv_udp_t) -> c_int { return rust_uv_udp_recv_stop(server); } +pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_t { + return rust_uv_get_udp_handle_from_send_req(send_req); +} + pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { return rust_uv_tcp_init(loop_handle, handle); } @@ -269,7 +276,7 @@ pub unsafe fn write(req: *uv_write_t, stream: *T, buf_in: &[uv_buf_t], cb: *u let buf_cnt = buf_in.len() as i32; return rust_uv_write(req as *c_void, stream as *c_void, buf_ptr, buf_cnt, cb); } -pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: *u8, on_read: *u8) -> c_int { +pub unsafe fn read_start(stream: *uv_stream_t, on_alloc: uv_alloc_cb, on_read: *u8) -> c_int { return rust_uv_read_start(stream as *c_void, on_alloc, on_read); } @@ -463,6 +470,7 @@ extern { buf_cnt: c_int, addr: *sockaddr_in6, cb: *u8) -> c_int; fn rust_uv_udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int; fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; + fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int; fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; From a7f92c92ed07588c3bde3cc38e64b9289ea682f5 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Fri, 14 Jun 2013 12:04:11 -0700 Subject: [PATCH 046/111] Added a UdpWatcher and UdpSendRequest with associated callbacks --- src/libstd/rt/uv/mod.rs | 15 +++- src/libstd/rt/uv/net.rs | 174 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 184 insertions(+), 5 deletions(-) diff --git a/src/libstd/rt/uv/mod.rs b/src/libstd/rt/uv/mod.rs index 5f9e56608149f..f7cc5c6cc8b02 100644 --- a/src/libstd/rt/uv/mod.rs +++ b/src/libstd/rt/uv/mod.rs @@ -54,7 +54,7 @@ use rt::io::IoError; #[cfg(test)] use unstable::run_in_bare_thread; pub use self::file::FsRequest; -pub use self::net::{StreamWatcher, TcpWatcher}; +pub use self::net::{StreamWatcher, TcpWatcher, UdpWatcher}; pub use self::idle::IdleWatcher; pub use self::timer::TimerWatcher; pub use self::async::AsyncWatcher; @@ -128,6 +128,8 @@ pub type ConnectionCallback = ~fn(StreamWatcher, Option); pub type FsCallback = ~fn(FsRequest, Option); pub type TimerCallback = ~fn(TimerWatcher, Option); pub type AsyncCallback = ~fn(AsyncWatcher, Option); +pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, Ipv4, uint, Option); +pub type UdpSendCallback = ~fn(UdpWatcher, Option); /// Callbacks used by StreamWatchers, set as custom data on the foreign handle @@ -139,7 +141,9 @@ struct WatcherData { alloc_cb: Option, idle_cb: Option, timer_cb: Option, - async_cb: Option + async_cb: Option, + udp_recv_cb: Option, + udp_send_cb: Option } pub trait WatcherInterop { @@ -169,7 +173,9 @@ impl> WatcherInterop for W { alloc_cb: None, idle_cb: None, timer_cb: None, - async_cb: None + async_cb: None, + udp_recv_cb: None, + udp_send_cb: None }; let data = transmute::<~WatcherData, *c_void>(data); uvll::set_data_for_uv_handle(self.native_handle(), data); @@ -309,6 +315,9 @@ pub fn status_to_maybe_uv_error(handle: *T, status: c_int) -> Option /// The uv buffer type pub type Buf = uvll::uv_buf_t; +/// The uv IPv4 type +pub type Ipv4 = uvll::sockaddr_in; + /// Borrow a slice to a Buf pub fn slice_to_uv_buf(v: &[u8]) -> Buf { let data = vec::raw::to_ptr(v); diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 68b871e6b3118..4079a2f7b770b 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -9,10 +9,10 @@ // except according to those terms. use prelude::*; -use libc::{size_t, ssize_t, c_int, c_void}; +use libc::{size_t, ssize_t, c_int, c_void, c_uint}; use rt::uv::uvll; use rt::uv::uvll::*; -use rt::uv::{AllocCallback, ConnectionCallback, ReadCallback}; +use rt::uv::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, UdpSendCallback}; use rt::uv::{Loop, Watcher, Request, UvError, Buf, NativeHandle, NullCallback, status_to_maybe_uv_error}; use rt::io::net::ip::{IpAddr, Ipv4, Ipv6}; @@ -254,6 +254,142 @@ impl NativeHandle<*uvll::uv_tcp_t> for TcpWatcher { } } +pub struct UdpWatcher(*uvll::uv_udp_t); +impl Watcher for UdpWatcher { } + +pub impl UdpWatcher { + fn new(loop_: &mut Loop) -> UdpWatcher { + unsafe { + let handle = malloc_handle(UV_UDP); + assert!(handle.is_not_null()); + assert_eq!(0, uvll::udp_init(loop_.native_handle(), handle)); + let mut watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + watcher.install_watcher_data(); + return watcher; + } + } + + fn bind(&mut self, address: IpAddr) -> Result<(), UvError> { + match address { + Ipv4(*) => { + do ip4_as_uv_ip4(address) |addr| { + let result = unsafe { + uvll::udp_bind(self.native_handle(), addr, 0u32) + }; + if result == 0 { + Ok(()) + } else { + Err(last_uv_error(self)) + } + } + } + _ => fail!() // TODO ipv6 + } + } + + fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { + { + let data = self.get_watcher_data(); + data.alloc_cb = Some(alloc); + data.udp_recv_cb = Some(cb); + } + + let handle = self.native_handle(); + unsafe { uvll::read_start(handle, alloc_cb, recv_cb); } + + extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { + let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + let data = udp_watcher.get_watcher_data(); + let alloc_cb = data.alloc_cb.get_ref(); + return (*alloc_cb)(suggested_size as uint); + } + + /* TODO the socket address should actually be a pointer to either a sockaddr_in or sockaddr_in6. + In libuv, the udp_recv callback takes a struct *sockaddr */ + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, + address: *uvll::sockaddr_in, flags: c_uint) { + rtdebug!("buf addr: %x", buf.base as uint); + rtdebug!("buf len: %d", buf.len as int); + let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + let data = udp_watcher.get_watcher_data(); + let cb = data.udp_recv_cb.get_ref(); + let status = status_to_maybe_uv_error(handle, nread as c_int); + unsafe { (*cb)(udp_watcher, nread as int, buf, *address, flags as uint, status) }; + } + } + + fn recv_stop(&mut self) { + let handle = self.native_handle(); + unsafe { uvll::udp_recv_stop(handle); } + } + + fn send(&mut self, buf: Buf, address: IpAddr, cb: UdpSendCallback) { + { + let data = self.get_watcher_data(); + assert!(data.udp_send_cb.is_none()); + data.udp_send_cb = Some(cb); + } + + let req = UdpSendRequest::new(); + let bufs = [buf]; + match address { + Ipv4(*) => { + do ip4_as_uv_ip4(address) |addr| { + unsafe { + assert!(0 == uvll::udp_send(req.native_handle(), + self.native_handle(), + bufs, addr, send_cb)); + } + } + } + _ => fail!() // TODO ipv6 + } + + extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { + let send_request: UdpSendRequest = NativeHandle::from_native_handle(req); + let mut udp_watcher = send_request.handle(); + send_request.delete(); + let cb = { + let data = udp_watcher.get_watcher_data(); + let cb = data.udp_send_cb.swap_unwrap(); + cb + }; + let status = status_to_maybe_uv_error(udp_watcher.native_handle(), status); + cb(udp_watcher, status); + } + } + + fn close(self, cb: NullCallback) { + { + let mut this = self; + let data = this.get_watcher_data(); + assert!(data.close_cb.is_none()); + data.close_cb = Some(cb); + } + + unsafe { uvll::close(self.native_handle(), close_cb); } + + extern fn close_cb(handle: *uvll::uv_udp_t) { + let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); + { + let data = udp_watcher.get_watcher_data(); + data.close_cb.swap_unwrap()(); + } + udp_watcher.drop_watcher_data(); + unsafe { free_handle(handle as *c_void) } + } + } +} + +impl NativeHandle<*uvll::uv_udp_t> for UdpWatcher { + fn from_native_handle(handle: *uvll::uv_udp_t) -> UdpWatcher { + UdpWatcher(handle) + } + fn native_handle(&self) -> *uvll::uv_udp_t { + match self { &UdpWatcher(ptr) => ptr } + } +} + // uv_connect_t is a subclass of uv_req_t struct ConnectRequest(*uvll::uv_connect_t); impl Request for ConnectRequest { } @@ -327,6 +463,40 @@ impl NativeHandle<*uvll::uv_write_t> for WriteRequest { } } +pub struct UdpSendRequest(*uvll::uv_udp_send_t); + +impl Request for UdpSendRequest { } + +pub impl UdpSendRequest { + fn new() -> UdpSendRequest { + let send_handle = unsafe { + malloc_req(UV_UDP_SEND) + }; + assert!(send_handle.is_not_null()); + let send_handle = send_handle as *uvll::uv_udp_send_t; + UdpSendRequest(send_handle) + } + + fn handle(&self) -> UdpWatcher { + unsafe { + let udp_handle = uvll::get_udp_handle_from_send_req(self.native_handle()); + NativeHandle::from_native_handle(udp_handle) + } + } + + fn delete(self) { + unsafe { free_req(self.native_handle() as *c_void) } + } +} + +impl NativeHandle<*uvll::uv_udp_send_t> for UdpSendRequest { + fn from_native_handle(handle: *uvll::uv_udp_send_t) -> UdpSendRequest { + UdpSendRequest(handle) + } + fn native_handle(&self) -> *uvll::uv_udp_send_t { + match self { &UdpSendRequest(ptr) => ptr } + } +} #[cfg(test)] mod test { From d1ec8b5fb85cb6fd4caed64223c5cb3fd920daab Mon Sep 17 00:00:00 2001 From: toddaaro Date: Fri, 14 Jun 2013 12:17:56 -0700 Subject: [PATCH 047/111] redesigned the pinning to pin deal with things on dequeue, not on enqueue --- src/libstd/macros.rs | 11 - src/libstd/rt/local.rs | 6 +- src/libstd/rt/sched.rs | 541 ++++++++++++++++++++++++++------------- src/libstd/rt/task.rs | 14 +- src/libstd/rt/test.rs | 32 ++- src/libstd/task/spawn.rs | 2 +- 6 files changed, 393 insertions(+), 213 deletions(-) diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs index bf5b36c7580ac..b01bd8f993c01 100644 --- a/src/libstd/macros.rs +++ b/src/libstd/macros.rs @@ -49,18 +49,7 @@ pub fn do_abort() -> ! { macro_rules! abort( ($( $msg:expr),+) => ( { rtdebug!($($msg),+); - -// do_abort(); - - // NB: This is in a fn to avoid putting the `unsafe` block in - // a macro, which causes spurious 'unnecessary unsafe block' - // warnings. -// fn do_abort() -> ! { -// unsafe { ::libc::abort(); } -// } - ::macros::do_abort(); - } ) ) diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index 359cf5fc3e179..6e0fbda5ec9a7 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -30,7 +30,7 @@ impl Local for Scheduler { fn borrow(f: &fn(&mut Scheduler) -> T) -> T { let mut res: Option = None; let res_ptr: *mut Option = &mut res; - unsafe { + unsafe { do local_ptr::borrow |sched| { let result = f(sched); *res_ptr = Some(result); @@ -39,7 +39,7 @@ impl Local for Scheduler { match res { Some(r) => { r } None => abort!("function failed!") - } + } } unsafe fn unsafe_borrow() -> *mut Scheduler { local_ptr::unsafe_borrow() } unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { abort!("unimpl") } @@ -139,5 +139,5 @@ mod test { assert!(res) let _scheduler: ~Scheduler = Local::take(); } - + } diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 4bc61d638248d..3b8a31d1840b3 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -90,27 +90,10 @@ pub struct Coroutine { priv saved_context: Context, /// The heap, GC, unwinding, local storage, logging task: ~Task, - /// The scheduler that this task calls home - home_sched: SchedHome } -// To send a Coroutine to another task we have to use contained home -// information (the SchedHandle). So we need a form that doesn't -// include one. - -// XXX perf: Evaluate this structure - there should be a clever way to -// make it such that we don't need to deal with building/destructing -// on Coroutines that aren't homed. - -pub struct HomelessCoroutine { - priv current_stack_segment: StackSegment, - priv saved_context: Context, - task: ~Task -} - // A scheduler home is either a handle to the home scheduler, or an // explicit "AnySched". - pub enum SchedHome { AnySched, Sched(SchedHandle) @@ -119,7 +102,7 @@ pub enum SchedHome { pub enum SchedMessage { Wake, Shutdown, - BiasedTask(~HomelessCoroutine) + PinnedTask(~Coroutine) } enum CleanupJob { @@ -193,6 +176,7 @@ pub impl Scheduler { (*event_loop).run(); } + rtdebug!("run taking sched"); let sched = Local::take::(); // XXX: Reenable this once we're using a per-task queue. With a shared // queue this is not true @@ -214,6 +198,7 @@ pub impl Scheduler { if sched.interpret_message_queue() { // We performed a scheduling action. There may be other work // to do yet, so let's try again later. + rtdebug!("run_sched_once, interpret_message_queue taking sched"); let mut sched = Local::take::(); sched.metrics.messages_received += 1; sched.event_loop.callback(Scheduler::run_sched_once); @@ -222,6 +207,7 @@ pub impl Scheduler { } // Now, look in the work queue for tasks to run + rtdebug!("run_sched_once taking"); let sched = Local::take::(); if sched.resume_task_from_queue() { // We performed a scheduling action. There may be other work @@ -271,7 +257,7 @@ pub impl Scheduler { // We don't want to queue tasks that belong on other threads, // so we send them home at enqueue time. - + // The borrow checker doesn't like our disassembly of the // Coroutine struct and partial use and mutation of the // fields. So completely disassemble here and stop using? @@ -283,95 +269,31 @@ pub impl Scheduler { let this = self; - match task { - ~Coroutine { current_stack_segment: css, - saved_context: sc, - task: t, - home_sched: home_sched } => { - - let mut home_sched = home_sched; - - match home_sched { - Sched(ref mut home_handle) - if home_handle.sched_id != this.sched_id() => { - - // In this branch we know the task is not - // home, so we send it home. - - rtdebug!("home_handle_id: %u, loc: %u", - home_handle.sched_id, - this.sched_id()); - let homeless = ~HomelessCoroutine { - current_stack_segment: css, - saved_context: sc, - task: t - }; - home_handle.send(BiasedTask(homeless)); - rtdebug!("sent task home"); - return (); - } - Sched( ref mut home_handle) => { - - // Here we know the task is home, so we need - // to "keep" it home. Since we don't have a - // scheduler-local queue for this purpose, we - // just use our message queue. - - rtdebug!("homed task at home, sending to self"); - let homeless = ~HomelessCoroutine { - current_stack_segment: css, - saved_context: sc, - task: t - }; - home_handle.send(BiasedTask(homeless)); - rtdebug!("sent home to self"); - return (); - - } - _ => { - - // We just destroyed our Coroutine ... but now - // we want it back. Build a new one? - // XXX: perf: see above comment about not - // destroying - - let task = ~Coroutine { - current_stack_segment: css, - saved_context: sc, - task: t, - home_sched: AnySched }; - - - // We push the task onto our local queue. - this.work_queue.push(task); - this.event_loop.callback(Scheduler::run_sched_once); - - // We've made work available. Notify a - // sleeping scheduler. - - // XXX: perf. Check for a sleeper without - // synchronizing memory. It's not critical - // that we always find it. - - // XXX: perf. If there's a sleeper then we - // might as well just send it the task - // directly instead of pushing it to the - // queue. That is essentially the intent here - // and it is less work. - match this.sleeper_list.pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake) - } - None => { (/* pass */) } - }; - } - } + // We push the task onto our local queue clone. + this.work_queue.push(task); + this.event_loop.callback(Scheduler::run_sched_once); + + // We've made work available. Notify a + // sleeping scheduler. + + // XXX: perf. Check for a sleeper without + // synchronizing memory. It's not critical + // that we always find it. + + // XXX: perf. If there's a sleeper then we + // might as well just send it the task + // directly instead of pushing it to the + // queue. That is essentially the intent here + // and it is less work. + match this.sleeper_list.pop() { + Some(handle) => { + let mut handle = handle; + handle.send(Wake) } - } + None => { (/* pass */) } + }; } - // * Scheduler-context operations fn interpret_message_queue(~self) -> bool { @@ -381,23 +303,11 @@ pub impl Scheduler { let mut this = self; match this.message_queue.pop() { - Some(BiasedTask(~HomelessCoroutine { - current_stack_segment: css, - saved_context: sc, - task: t})) => { + Some(PinnedTask(task)) => { rtdebug!("recv BiasedTask message in sched: %u", this.sched_id()); - - // Since this was the "send home" message for a task, - // we know that this is the home. So we rebuild the - // sched_handle. - - let task = ~Coroutine { - current_stack_segment: css, - saved_context: sc, - task: t, - home_sched: Sched(this.make_handle()) - }; + let mut task = task; + task.task.home = Some(Sched(this.make_handle())); this.resume_task_immediately(task); return true; } @@ -438,32 +348,93 @@ pub impl Scheduler { } } + /// Given an input Coroutine sends it back to its home scheduler. + fn send_task_home(task: ~Coroutine) { + let mut task = task; + let mut home = task.task.home.swap_unwrap(); + match home { + Sched(ref mut home_handle) => { + home_handle.send(PinnedTask(task)); + } + AnySched => { + abort!("error: cannot send anysched task home"); + } + } + } + + // Resume a task from the queue - but also take into account that + // it might not belong here. fn resume_task_from_queue(~self) -> bool { assert!(!self.in_task_context()); rtdebug!("looking in work queue for task to schedule"); let mut this = self; - if this.run_anything { - match this.work_queue.pop() { - Some(task) => { - rtdebug!("resuming task from work queue"); - this.resume_task_immediately(task); - return true; - } - None => { - rtdebug!("no tasks in queue"); - Local::put(this); - return false; + // The borrow checker imposes the possibly absurd requirement + // that we split this into two match expressions. This is due + // to the inspection of the internal bits of task, as that + // can't be in scope when we act on task. + match this.work_queue.pop() { + Some(task) => { + let action_id = { + let home = &task.task.home; + match home { + &Some(Sched(ref home_handle)) + if home_handle.sched_id != this.sched_id() => { + 0 + } + &Some(AnySched) if this.run_anything => { + 1 + } + &Some(AnySched) => { + 2 + } + &Some(Sched(_)) => { + 3 + } + &None => { + 4 + } + } + }; + + match action_id { + 0 => { + rtdebug!("sending task home"); + Scheduler::send_task_home(task); + Local::put(this); + return false; + } + 1 => { + rtdebug!("resuming now"); + this.resume_task_immediately(task); + return true; + } + 2 => { + rtdebug!("re-queueing") + this.enqueue_task(task); + Local::put(this); + return false; + } + 3 => { + rtdebug!("resuming now"); + this.resume_task_immediately(task); + return true; + } + 4 => { + abort!("task home was None!"); + } + _ => { + abort!("literally, you should not be here"); + } } } - } else { - // In this branch we have a scheduler that is not allowed - // to run unpinned tasks. As such it will only get tasks - // to run from the message queue. - rtdebug!("skipping resume_task_from_queue"); - Local::put(this); - return false; + + None => { + rtdebug!("no tasks in queue"); + Local::put(this); + return false; + } } } @@ -484,21 +455,32 @@ pub impl Scheduler { abort!("control reached end of task"); } - fn schedule_new_task(~self, task: ~Coroutine) { + pub fn schedule_task(~self, task: ~Coroutine) { assert!(self.in_task_context()); - do self.switch_running_tasks_and_then(task) |sched, last_task| { - let last_task = Cell(last_task); - sched.enqueue_task(last_task.take()); - } - } + // is the task home? + let is_home = task.is_home_no_tls(&self); - fn schedule_task(~self, task: ~Coroutine) { - assert!(self.in_task_context()); + // does the task have a home? + let homed = task.homed(); + + let mut this = self; - do self.switch_running_tasks_and_then(task) |sched, last_task| { - let last_task = Cell(last_task); - sched.enqueue_task(last_task.take()); + if is_home || (!homed && this.run_anything) { + // here we know we are home, execute now OR we know we + // aren't homed, and that this sched doesn't care + do this.switch_running_tasks_and_then(task) |sched, last_task| { + let last_task = Cell(last_task); + sched.enqueue_task(last_task.take()); + } + } else if !homed && !this.run_anything { + // the task isn't homed, but it can't be run here + this.enqueue_task(task); + Local::put(this); + } else { + // task isn't home, so don't run it here, send it home + Scheduler::send_task_home(task); + Local::put(this); } } @@ -681,19 +663,66 @@ impl SchedHandle { pub impl Coroutine { - - /// This function checks that a coroutine is running "home". - fn am_home(&self) -> bool { + /// This function checks that a coroutine is running "home". + fn is_home(&self) -> bool { + rtdebug!("checking if coroutine is home"); do Local::borrow:: |sched| { - match self.home_sched { - AnySched => { true } - Sched(SchedHandle { sched_id: ref id, _ }) => { + match self.task.home { + Some(AnySched) => { false } + Some(Sched(SchedHandle { sched_id: ref id, _ })) => { *id == sched.sched_id() } + None => { abort!("error: homeless task!"); } + } + } + } + + /// Without access to self, but with access to the "expected home + /// id", see if we are home. + fn is_home_using_id(id: uint) -> bool { + rtdebug!("checking if coroutine is home using id"); + do Local::borrow:: |sched| { + if sched.sched_id() == id { + true + } else { + false + } + } + } + + /// Check if this coroutine has a home + fn homed(&self) -> bool { + rtdebug!("checking if this coroutine has a home"); + match self.task.home { + Some(AnySched) => { false } + Some(Sched(_)) => { true } + None => { abort!("error: homeless task!"); + } + } + } + + /// A version of is_home that does not need to use TLS, it instead + /// takes local scheduler as a parameter. + fn is_home_no_tls(&self, sched: &~Scheduler) -> bool { + rtdebug!("checking if coroutine is home without tls"); + match self.task.home { + Some(AnySched) => { true } + Some(Sched(SchedHandle { sched_id: ref id, _})) => { + *id == sched.sched_id() } + None => { abort!("error: homeless task!"); } + } + } + + /// Check TLS for the scheduler to see if we are on a special + /// scheduler. + pub fn on_special() -> bool { + rtdebug!("checking if coroutine is executing on special sched"); + do Local::borrow::() |sched| { + !sched.run_anything } - } - + } + // Created new variants of "new" that takes a home scheduler // parameter. The original with_task now calls with_task_homed // using the AnySched paramter. @@ -710,19 +739,20 @@ pub impl Coroutine { task: ~Task, start: ~fn(), home: SchedHome) -> Coroutine { - + static MIN_STACK_SIZE: uint = 10000000; // XXX: Too much stack - + let start = Coroutine::build_start_wrapper(start); let mut stack = stack_pool.take_segment(MIN_STACK_SIZE); // NB: Context holds a pointer to that ~fn let initial_context = Context::new(start, &mut stack); - return Coroutine { + let mut crt = Coroutine { current_stack_segment: stack, saved_context: initial_context, task: task, - home_sched: home }; + crt.task.home = Some(home); + return crt; } fn with_task(stack_pool: &mut StackPool, @@ -841,7 +871,7 @@ mod test { let sched_handle = sched.make_handle(); let sched_id = sched.sched_id(); - + let task = ~do Coroutine::new_homed(&mut sched.stack_pool, Sched(sched_handle)) { unsafe { *task_ran_ptr = true }; @@ -855,6 +885,146 @@ mod test { } } + // A test for each state of schedule_task + + #[test] + fn test_schedule_home_states() { + + use rt::uv::uvio::UvEventLoop; + use rt::sched::Shutdown; + use rt::sleeper_list::SleeperList; + use rt::work_queue::WorkQueue; + + do run_in_bare_thread { +// let nthreads = 2; + + let sleepers = SleeperList::new(); + let work_queue = WorkQueue::new(); + + // our normal scheduler + let mut normal_sched = ~Scheduler::new( + ~UvEventLoop::new(), + work_queue.clone(), + sleepers.clone()); + + let normal_handle = Cell(normal_sched.make_handle()); + + // our special scheduler + let mut special_sched = ~Scheduler::new_special( + ~UvEventLoop::new(), + work_queue.clone(), + sleepers.clone(), + true); + + let special_handle = Cell(special_sched.make_handle()); + let special_handle2 = Cell(special_sched.make_handle()); + let special_id = special_sched.sched_id(); + let t1_handle = special_sched.make_handle(); + let t4_handle = special_sched.make_handle(); + + let t1f = ~do Coroutine::new_homed(&mut special_sched.stack_pool, + Sched(t1_handle)) { + let is_home = Coroutine::is_home_using_id(special_id); + rtdebug!("t1 should be home: %b", is_home); + assert!(is_home); + }; + let t1f = Cell(t1f); + + let t2f = ~do Coroutine::new(&mut normal_sched.stack_pool) { + let on_special = Coroutine::on_special(); + rtdebug!("t2 should not be on special: %b", on_special); + assert!(!on_special); + }; + let t2f = Cell(t2f); + + let t3f = ~do Coroutine::new(&mut normal_sched.stack_pool) { + // not on special + let on_special = Coroutine::on_special(); + rtdebug!("t3 should not be on special: %b", on_special); + assert!(!on_special); + }; + let t3f = Cell(t3f); + + let t4f = ~do Coroutine::new_homed(&mut special_sched.stack_pool, + Sched(t4_handle)) { + // is home + let home = Coroutine::is_home_using_id(special_id); + rtdebug!("t4 should be home: %b", home); + assert!(home); + }; + let t4f = Cell(t4f); + + // we have four tests, make them as closures + let t1: ~fn() = || { + // task is home on special + let task = t1f.take(); + let sched = Local::take::(); + sched.schedule_task(task); + }; + let t2: ~fn() = || { + // not homed, task doesn't care + let task = t2f.take(); + let sched = Local::take::(); + sched.schedule_task(task); + }; + let t3: ~fn() = || { + // task not homed, must leave + let task = t3f.take(); + let sched = Local::take::(); + sched.schedule_task(task); + }; + let t4: ~fn() = || { + // task not home, send home + let task = t4f.take(); + let sched = Local::take::(); + sched.schedule_task(task); + }; + + let t1 = Cell(t1); + let t2 = Cell(t2); + let t3 = Cell(t3); + let t4 = Cell(t4); + + // build a main task that runs our four tests + let main_task = ~do Coroutine::new(&mut normal_sched.stack_pool) { + // the two tasks that require a normal start location + t2.take()(); + t4.take()(); + normal_handle.take().send(Shutdown); + special_handle.take().send(Shutdown); + }; + + // task to run the two "special start" tests + let special_task = ~do Coroutine::new_homed( + &mut special_sched.stack_pool, + Sched(special_handle2.take())) { + t1.take()(); + t3.take()(); + }; + + // enqueue the main tasks + normal_sched.enqueue_task(special_task); + normal_sched.enqueue_task(main_task); + + let nsched_cell = Cell(normal_sched); + let normal_thread = do Thread::start { + let sched = nsched_cell.take(); + sched.run(); + }; + + let ssched_cell = Cell(special_sched); + let special_thread = do Thread::start { + let sched = ssched_cell.take(); + sched.run(); + }; + + // wait for the end + let _thread1 = normal_thread; + let _thread2 = special_thread; + + } + } + // The following test is a bit of a mess, but it trys to do // something tricky so I'm not sure how to get around this in the // short term. @@ -865,9 +1035,9 @@ mod test { // observe that the task is not home, and send it home. // This test is light in that it does very little. - + #[test] - fn test_transfer_task_home() { + fn test_transfer_task_home() { use rt::uv::uvio::UvEventLoop; use rt::sched::Shutdown; @@ -879,18 +1049,18 @@ mod test { use vec::OwnedVector; do run_in_bare_thread { - + static N: uint = 8; - + let sleepers = SleeperList::new(); let work_queue = WorkQueue::new(); - + let mut handles = ~[]; let mut scheds = ~[]; - + for uint::range(0, N) |_| { let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new(loop_, + let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); let handle = sched.make_handle(); @@ -901,7 +1071,7 @@ mod test { let handles = Cell(handles); - let home_handle = scheds[6].make_handle(); + let home_handle = scheds[6].make_handle(); let home_id = home_handle.sched_id; let home = Sched(home_handle); @@ -913,18 +1083,18 @@ mod test { sched.sched_id(), home_id); assert!(sched.sched_id() == home_id); - Local::put::(sched); + Local::put::(sched); let mut handles = handles.take(); for handles.each_mut |handle| { handle.send(Shutdown); } }; - + scheds[0].enqueue_task(main_task); - + let mut threads = ~[]; - + while !scheds.is_empty() { let sched = scheds.pop(); let sched_cell = Cell(sched); @@ -934,13 +1104,23 @@ mod test { }; threads.push(thread); } - + let _threads = threads; } } - + + // Do it a lot + + #[test] + fn test_stress_schedule_task_states() { + let n = stress_factor() * 120; + for int::range(0,n as int) |_| { + test_schedule_home_states(); + } + } + // The goal is that this is the high-stress test for making sure - // homing is working. It allocates 120*RUST_RT_STRESS tasks that + // homing is working. It allocates RUST_RT_STRESS tasks that // do nothing but assert that they are home at execution // time. These tasks are queued to random schedulers, so sometimes // they are home and sometimes not. It also runs RUST_RT_STRESS @@ -953,7 +1133,6 @@ mod test { run_in_mt_newsched_task_random_homed(); } } - #[test] fn test_simple_scheduling() { @@ -1210,8 +1389,8 @@ mod test { fn start_closure_dtor() { use ops::Drop; - // Regression test that the `start` task entrypoint can contain dtors - // that use task resources + // Regression test that the `start` task entrypoint can + // contain dtors that use task resources do run_in_newsched_task { struct S { field: () } @@ -1226,7 +1405,7 @@ mod test { do spawntask { let _ss = &s; } - } + } } } diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 4d9851d3b409b..06318ac6623b0 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -19,6 +19,7 @@ use cast::transmute; use rt::local::Local; use super::local_heap::LocalHeap; use rt::logging::StdErrLogger; +use rt::sched::{SchedHome, AnySched}; pub struct Task { heap: LocalHeap, @@ -26,7 +27,8 @@ pub struct Task { storage: LocalStorage, logger: StdErrLogger, unwinder: Option, - destroyed: bool + destroyed: bool, + home: Option } pub struct GarbageCollector; @@ -44,7 +46,8 @@ impl Task { storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, unwinder: Some(Unwinder { unwinding: false }), - destroyed: false + destroyed: false, + home: Some(AnySched) } } @@ -55,10 +58,15 @@ impl Task { storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, unwinder: None, - destroyed: false + destroyed: false, + home: Some(AnySched) } } + pub fn give_home(&mut self, new_home: SchedHome) { + self.home = Some(new_home); + } + pub fn run(&mut self, f: &fn()) { // This is just an assertion that `run` was called unsafely // and this instance of Task is still accessible. diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index ecef505ce579d..bb284c0254179 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -162,18 +162,19 @@ pub fn run_in_mt_newsched_task_random_homed() { for uint::range(0, nthreads) |i| { let special = (i % 2) == 0; let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new_special(loop_, work_queue.clone(), sleepers.clone(), special); + let mut sched = ~Scheduler::new_special( + loop_, work_queue.clone(), sleepers.clone(), special); let handle = sched.make_handle(); handles.push(handle); scheds.push(sched); - } + } // Schedule a pile o tasks - let n = 5*stress_factor(); + let n = 5*stress_factor(); for uint::range(0,n) |_i| { rtdebug!("creating task: %u", _i); let hf: ~fn() = || { assert!(true) }; - spawntask_homed(&mut scheds, hf); + spawntask_homed(&mut scheds, hf); } // Now we want another pile o tasks that do not ever run on a @@ -182,11 +183,11 @@ pub fn run_in_mt_newsched_task_random_homed() { let n = 5*stress_factor(); - let f: ~fn() = || { + let f: ~fn() = || { for uint::range(0,n) |_| { - let f: ~fn() = || { + let f: ~fn() = || { // Borrow the scheduler we run on and check if it is - // privliged. + // privileged. do Local::borrow:: |sched| { assert!(sched.run_anything); }; @@ -194,12 +195,12 @@ pub fn run_in_mt_newsched_task_random_homed() { spawntask_random(f); }; }; - + let f_cell = Cell(f); let handles = Cell(handles); rtdebug!("creating main task"); - + let main_task = ~do Coroutine::new(&mut scheds[0].stack_pool) { f_cell.take()(); let mut handles = handles.take(); @@ -210,7 +211,7 @@ pub fn run_in_mt_newsched_task_random_homed() { }; rtdebug!("queuing main task") - + scheds[0].enqueue_task(main_task); let mut threads = ~[]; @@ -243,11 +244,13 @@ pub fn run_in_mt_newsched_task_random_homed() { pub fn spawntask(f: ~fn()) { use super::sched::*; + rtdebug!("spawntask taking the scheduler from TLS") let mut sched = Local::take::(); let task = ~Coroutine::with_task(&mut sched.stack_pool, ~Task::without_unwinding(), f); - sched.schedule_new_task(task); + rtdebug!("spawntask scheduling the new task"); + sched.schedule_task(task); } /// Create a new task and run it right now. Aborts on failure @@ -305,7 +308,7 @@ pub fn spawntask_homed(scheds: &mut ~[~Scheduler], f: ~fn()) { use super::sched::*; use rand::{rng, RngUtil}; let mut rng = rng(); - + let task = { let sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; let handle = sched.make_handle(); @@ -321,14 +324,15 @@ pub fn spawntask_homed(scheds: &mut ~[~Scheduler], f: ~fn()) { assert!(home_id == sched.sched_id()); }; f() - }; - + }; + ~Coroutine::with_task_homed(&mut sched.stack_pool, ~Task::without_unwinding(), af, Sched(handle)) }; let dest_sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; + // enqueue it for future execution dest_sched.enqueue_task(task); } diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index 5941221821a85..5e507238f671f 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -578,7 +578,7 @@ fn spawn_raw_newsched(_opts: TaskOpts, f: ~fn()) { let mut sched = Local::take::(); let task = ~Coroutine::new(&mut sched.stack_pool, f); - sched.schedule_new_task(task); + sched.schedule_task(task); } fn spawn_raw_oldsched(mut opts: TaskOpts, f: ~fn()) { From 505ef7e710ff890c0027fadad54997041b7ee93b Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 13 Jun 2013 23:31:19 -0700 Subject: [PATCH 048/111] std::rt: Tasks contain a JoinLatch --- src/libstd/rt/task.rs | 33 +++++++++++++++++++++++++- src/libstd/rt/test.rs | 55 +++++++++++++++++++------------------------ 2 files changed, 56 insertions(+), 32 deletions(-) diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 7c08dabf0bd89..75ca4c941c50b 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -16,9 +16,11 @@ use prelude::*; use libc::{c_void, uintptr_t}; use cast::transmute; +use option::{Option, Some, None}; use rt::local::Local; use super::local_heap::LocalHeap; use rt::logging::StdErrLogger; +use rt::join_latch::JoinLatch; pub struct Task { heap: LocalHeap, @@ -26,6 +28,8 @@ pub struct Task { storage: LocalStorage, logger: StdErrLogger, unwinder: Unwinder, + join_latch: Option<~JoinLatch>, + on_exit: Option<~fn(bool)>, destroyed: bool } @@ -44,6 +48,8 @@ impl Task { storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, unwinder: Unwinder { unwinding: false }, + join_latch: Some(JoinLatch::new_root()), + on_exit: None, destroyed: false } } @@ -55,6 +61,8 @@ impl Task { storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, unwinder: Unwinder { unwinding: false }, + join_latch: Some(self.join_latch.get_mut_ref().new_child()), + on_exit: None, destroyed: false } } @@ -68,9 +76,22 @@ impl Task { self.unwinder.try(f); self.destroy(); + + // Wait for children. Possibly report the exit status. + let local_success = !self.unwinder.unwinding; + let join_latch = self.join_latch.swap_unwrap(); + match self.on_exit { + Some(ref on_exit) => { + let success = join_latch.wait(local_success); + (*on_exit)(success); + } + None => { + join_latch.release(local_success); + } + } } - /// Must be called manually before finalization to clean up + /// must be called manually before finalization to clean up /// thread-local resources. Some of the routines here expect /// Task to be available recursively so this must be /// called unsafely, without removing Task from @@ -216,5 +237,15 @@ mod test { assert!(port.recv() == 10); } } + + #[test] + fn linked_failure() { + do run_in_newsched_task() { + let res = do spawntask_try { + spawntask_random(|| fail!()); + }; + assert!(res.is_err()); + } + } } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index ecfe93560b4b9..36e394e5c5bba 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -18,6 +18,7 @@ use vec::OwnedVector; use result::{Result, Ok, Err}; use unstable::run_in_bare_thread; use super::io::net::ip::{IpAddr, Ipv4}; +use rt::comm::oneshot; use rt::task::Task; use rt::thread::Thread; use rt::local::Local; @@ -47,8 +48,11 @@ pub fn run_in_newsched_task(f: ~fn()) { do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); + let mut new_task = ~Task::new_root(); + let on_exit: ~fn(bool) = |exit_status| rtassert!(exit_status); + new_task.on_exit = Some(on_exit); let task = ~Coroutine::with_task(&mut sched.stack_pool, - ~Task::new_root(), + new_task, f.take()); sched.enqueue_task(task); sched.run(); @@ -94,16 +98,20 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let f_cell = Cell(f_cell.take()); let handles = Cell(handles); - let main_task = ~do Coroutine::new_root(&mut scheds[0].stack_pool) { - f_cell.take()(); + let mut new_task = ~Task::new_root(); + let on_exit: ~fn(bool) = |exit_status| { let mut handles = handles.take(); // Tell schedulers to exit for handles.each_mut |handle| { handle.send(Shutdown); } - }; + rtassert!(exit_status); + }; + new_task.on_exit = Some(on_exit); + let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, + new_task, f_cell.take()); scheds[0].enqueue_task(main_task); let mut threads = ~[]; @@ -213,36 +221,21 @@ pub fn spawntask_random(f: ~fn()) { pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { use cell::Cell; use super::sched::*; - use task; - use unstable::finally::Finally; - - // Our status variables will be filled in from the scheduler context - let mut failed = false; - let failed_ptr: *mut bool = &mut failed; - - // Switch to the scheduler - let f = Cell(Cell(f)); - let sched = Local::take::(); - do sched.deschedule_running_task_and_then() |sched, old_task| { - let old_task = Cell(old_task); - let f = f.take(); - let new_task = ~do Coroutine::new_root(&mut sched.stack_pool) { - do (|| { - (f.take())() - }).finally { - // Check for failure then resume the parent task - unsafe { *failed_ptr = task::failing(); } - let sched = Local::take::(); - do sched.switch_running_tasks_and_then(old_task.take()) |sched, new_task| { - sched.enqueue_task(new_task); - } - } - }; - sched.enqueue_task(new_task); + let (port, chan) = oneshot(); + let chan = Cell(chan); + let mut new_task = ~Task::new_root(); + let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status); + new_task.on_exit = Some(on_exit); + let mut sched = Local::take::(); + let new_task = ~Coroutine::with_task(&mut sched.stack_pool, + new_task, f); + do sched.switch_running_tasks_and_then(new_task) |sched, old_task| { + sched.enqueue_task(old_task); } - if !failed { Ok(()) } else { Err(()) } + let exit_status = port.recv(); + if exit_status { Ok(()) } else { Err(()) } } // Spawn a new task in a new scheduler and return a thread handle. From 9687437d4543c2395b92ebcb910fcaf9d8b2cd44 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 17 Jun 2013 12:31:30 -0700 Subject: [PATCH 049/111] added wrappers about uv_ip{4,6}_{port,name} --- src/libstd/rt/uv/uvll.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index 4c0791b3b5c7c..06315c4cb383f 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -339,6 +339,22 @@ pub unsafe fn free_ip6_addr(addr: *sockaddr_in6) { rust_uv_free_ip6_addr(addr); } +pub unsafe fn ip4_name(addr: *sockaddr_in, dst: *u8, size: size_t) -> c_int { + return rust_uv_ip4_name(addr, dst, size); +} + +pub unsafe fn ip6_name(addr: *sockaddr_in6, dst: *u8, size: size_t) -> c_int { + return rust_uv_ip6_name(addr, dst, size); +} + +pub unsafe fn ip4_port(addr: *sockaddr_in) -> c_uint { + return rust_uv_ip4_port(addr); +} + +pub unsafe fn ip6_port(addr: *sockaddr_in6) -> c_uint { + return rust_uv_ip6_port(addr); +} + // data access helpers pub unsafe fn get_loop_for_uv_handle(handle: *T) -> *c_void { return rust_uv_get_loop_for_uv_handle(handle as *c_void); From b51d1885befe2779fa3fdc9e2a9bafa4ef3d5cf2 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 17 Jun 2013 12:32:21 -0700 Subject: [PATCH 050/111] Added a RtioUdpStream trait --- src/libstd/rt/rtio.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index fa657555f3aa0..0eebbb61e5b56 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -22,6 +22,7 @@ pub type RemoteCallbackObject = uvio::UvRemoteCallback; pub type IoFactoryObject = uvio::UvIoFactory; pub type RtioTcpStreamObject = uvio::UvTcpStream; pub type RtioTcpListenerObject = uvio::UvTcpListener; +pub type RtioUdpStreamObject = uvio::UvUdpStream; pub trait EventLoop { fn run(&mut self); @@ -44,6 +45,7 @@ pub trait RemoteCallback { pub trait IoFactory { fn tcp_connect(&mut self, addr: IpAddr) -> Result<~RtioTcpStreamObject, IoError>; fn tcp_bind(&mut self, addr: IpAddr) -> Result<~RtioTcpListenerObject, IoError>; + // TODO fn udp_connect(&mut self, addr: IpAddr) -> Result<~RtioUdpStreamObject, IoError>; } pub trait RtioTcpListener { @@ -54,3 +56,8 @@ pub trait RtioTcpStream { fn read(&mut self, buf: &mut [u8]) -> Result; fn write(&mut self, buf: &[u8]) -> Result<(), IoError>; } + +pub trait RtioUdpStream { + fn read(&mut self, buf: &mut [u8]) -> Result; + fn write(&mut self, buf: &[u8]) -> Result<(), IoError>; +} From 7e022c590fb0fb6083be7825df90045505e6fe47 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 17 Jun 2013 12:33:10 -0700 Subject: [PATCH 051/111] added a function to convert C's ipv4 data structure into the Rust ipv4 data structure. --- src/libstd/rt/uv/net.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 4079a2f7b770b..0b77bd83958eb 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -38,6 +38,14 @@ fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { } } +pub fn uv_ip4_to_ip4(addr: *sockaddr_in) -> IpAddr { + let ip4_size = 16; + let buf = vec::from_elem(ip4_size, 0u8); + unsafe { ip4_name(addr, &buf[0], ip4_size as u64) }; + let port = unsafe { ip4_port(addr) }; + Ipv4(buf[0], buf[1], buf[2], buf[3], port as u16) +} + // uv_stream t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t // and uv_file_t pub struct StreamWatcher(*uvll::uv_stream_t); From 47443753f1197877f51f4a66d3475f8c9e4d5bc2 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 17 Jun 2013 12:33:46 -0700 Subject: [PATCH 052/111] added Eq and TotalEq instances for IpAddr --- src/libstd/rt/io/net/ip.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/libstd/rt/io/net/ip.rs b/src/libstd/rt/io/net/ip.rs index df1dfe4d38ad1..f885c7f278861 100644 --- a/src/libstd/rt/io/net/ip.rs +++ b/src/libstd/rt/io/net/ip.rs @@ -8,7 +8,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cmp::{Eq, TotalEq, eq}; + pub enum IpAddr { Ipv4(u8, u8, u8, u8, u16), Ipv6 } + +impl Eq for IpAddr { + fn eq(&self, other: &IpAddr) -> bool { + match (*self, *other) { + (Ipv4(a,b,c,d,e), Ipv4(f,g,h,i,j)) => a == f && b == g && c == h && d == i && e == j, + (Ipv6, Ipv6) => fail!(), + _ => false + } + } + fn ne(&self, other: &IpAddr) -> bool { + !eq(self, other) + } +} + +impl TotalEq for IpAddr { + fn equals(&self, other: &IpAddr) -> bool { + *self == *other + } +} From e42f28c05cb8e579d06492c49822944946341c9f Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 17 Jun 2013 12:34:58 -0700 Subject: [PATCH 053/111] stated to implement UdpStream --- src/libstd/rt/io/net/udp.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index bb5457e334dda..6275eff924988 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -8,13 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use prelude::*; -use super::super::*; -use super::ip::IpAddr; +use option::{Option}; +use rt::io::net::ip::IpAddr; +use rt::io::{Reader, Writer, Listener}; +use rt::rtio::{RtioUdpStreamObject}; -pub struct UdpStream; +pub struct UdpStream { + rtstream: ~RtioUdpStreamObject +} impl UdpStream { + fn new(s: ~RtioUdpStreamObject) -> UdpStream { + UdpStream { + rtstream: s + } + } + pub fn connect(_addr: IpAddr) -> Option { fail!() } From 33ae193a3c1a156e73bf6880366c9785dd4b7393 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 17 Jun 2013 12:35:27 -0700 Subject: [PATCH 054/111] Started to implemented UdpStream --- src/libstd/rt/uv/uvio.rs | 80 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 0f98ab11513d6..1274dbc3220cf 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -433,6 +433,86 @@ impl RtioTcpStream for UvTcpStream { } } +pub struct UvUdpStream { + watcher: UdpWatcher, + address: IpAddr +} + +impl UvUdpStream { + fn watcher(&self) -> UdpWatcher { self.watcher } + fn address(&self) -> IpAddr { self.address } +} + +impl Drop for UvUdpStream { + fn finalize(&self) { + rtdebug!("closing udp stream"); + let watcher = self.watcher(); + let scheduler = Local::take::(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell(task); + do watcher.close { + let scheduler = Local::take::(); + scheduler.resume_task_immediately(task_cell.take()); + } + } + } +} + +impl RtioUdpStream for UvUdpStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + let result_cell = empty_cell(); + let result_cell_ptr: *Cell> = &result_cell; + + let scheduler = Local::take::(); + assert!(scheduler.in_task_context()); + let watcher = self.watcher(); + let connection_address = self.address(); + let buf_ptr: *&mut [u8] = &buf; + do scheduler.deschedule_running_task_and_then |sched, task| { + rtdebug!("read: entered scheduler context"); + assert!(!sched.in_task_context()); + let mut watcher = watcher; + let task_cell = Cell(task); + // XXX: see note in RtioTcpStream implementation for UvTcpStream + let alloc: AllocCallback = |_| unsafe { + slice_to_uv_buf(*buf_ptr) + }; + do watcher.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { + let _ = flags; // TODO actually use flags + + // XXX: see note in RtioTcpStream implementation for UvTcpStream + let mut watcher = watcher; + watcher.recv_stop(); + + let incoming_address = net::uv_ip4_to_ip4(&addr); + let result = if status.is_none() { + assert!(nread >= 0); + if incoming_address != connection_address { + Ok(0u) + } else { + Ok(nread as uint) + } + } else { + Err(uv_error_to_io_error(status.unwrap())) + }; + + unsafe { (*result_cell_ptr).put_back(result); } + + let scheduler = Local::take::(); + scheduler.resume_task_immediately(task_cell.take()); + } + } + + assert!(!result_cell.is_empty()); + return result_cell.take(); + } + + fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + let _ = buf; + fail!() + } +} + #[test] fn test_simple_io_no_connect() { do run_in_newsched_task { From 3281f5b63792a57d2cea6e93446e63f44e1e3ea0 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 17 Jun 2013 22:17:51 -0700 Subject: [PATCH 055/111] std::rt: Add util mod and num_cpus function --- src/libstd/rt/mod.rs | 3 +++ src/libstd/rt/test.rs | 7 ++----- src/libstd/rt/util.rs | 22 ++++++++++++++++++++++ 3 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 src/libstd/rt/util.rs diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 1724361cabcc0..10b5c78f99e5d 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -139,6 +139,9 @@ pub mod join_latch; pub mod metrics; +// FIXME #5248 shouldn't be pub +/// Just stuff +pub mod util; /// Set up a default runtime configuration, given compiler-supplied arguments. /// diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 6e4fb9b1d9402..36efcd91834b8 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -63,11 +63,11 @@ pub fn run_in_newsched_task(f: ~fn()) { /// in one of the schedulers. The schedulers will stay alive /// until the function `f` returns. pub fn run_in_mt_newsched_task(f: ~fn()) { - use libc; use os; use from_str::FromStr; use rt::uv::uvio::UvEventLoop; use rt::sched::Shutdown; + use rt::util; let f_cell = Cell::new(f); @@ -78,7 +78,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { // Using more threads than cores in test code // to force the OS to preempt them frequently. // Assuming that this help stress test concurrent types. - rust_get_num_cpus() * 2 + util::num_cpus() * 2 } }; @@ -132,9 +132,6 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let _threads = threads; } - extern { - fn rust_get_num_cpus() -> libc::uintptr_t; - } } // THIS IS AWFUL. Copy-pasted the above initialization function but diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs new file mode 100644 index 0000000000000..39b9de90f3478 --- /dev/null +++ b/src/libstd/rt/util.rs @@ -0,0 +1,22 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use libc; + +/// Get the number of cores available +pub fn num_cpus() -> uint { + unsafe { + return rust_get_num_cpus(); + } + + extern { + fn rust_get_num_cpus() -> libc::uintptr_t; + } +} \ No newline at end of file From 9ef4c413a869b4fc1e5df3f79f484b2ffd46cda0 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 17 Jun 2013 23:18:20 -0700 Subject: [PATCH 056/111] std::rt: Check exchange count on exit --- src/libstd/rt/global_heap.rs | 37 ++++++++++++++++++++++++++++++------ src/libstd/rt/mod.rs | 6 ++++++ 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/global_heap.rs index ce7ff87b44580..9f273fc8e6535 100644 --- a/src/libstd/rt/global_heap.rs +++ b/src/libstd/rt/global_heap.rs @@ -14,7 +14,7 @@ use c_malloc = libc::malloc; use c_free = libc::free; use managed::raw::{BoxHeaderRepr, BoxRepr}; use cast::transmute; -use unstable::intrinsics::{atomic_xadd,atomic_xsub}; +use unstable::intrinsics::{atomic_xadd,atomic_xsub, atomic_load}; use ptr::null; use intrinsic::TyDesc; @@ -34,8 +34,7 @@ pub unsafe fn malloc(td: *TypeDesc, size: uint) -> *c_void { box.header.prev = null(); box.header.next = null(); - let exchange_count = &mut *exchange_count_ptr(); - atomic_xadd(exchange_count, 1); + inc_count(); return transmute(box); } @@ -48,21 +47,47 @@ pub unsafe fn malloc_raw(size: uint) -> *c_void { if p.is_null() { fail!("Failure in malloc_raw: result ptr is null"); } + inc_count(); p } pub unsafe fn free(ptr: *c_void) { - let exchange_count = &mut *exchange_count_ptr(); - atomic_xsub(exchange_count, 1); - assert!(ptr.is_not_null()); + dec_count(); c_free(ptr); } ///Thin wrapper around libc::free, as with exchange_alloc::malloc_raw pub unsafe fn free_raw(ptr: *c_void) { + assert!(ptr.is_not_null()); + dec_count(); c_free(ptr); } +fn inc_count() { + unsafe { + let exchange_count = &mut *exchange_count_ptr(); + atomic_xadd(exchange_count, 1); + } +} + +fn dec_count() { + unsafe { + let exchange_count = &mut *exchange_count_ptr(); + atomic_xsub(exchange_count, 1); + } +} + +pub fn cleanup() { + unsafe { + let count_ptr = exchange_count_ptr(); + let allocations = atomic_load(&*count_ptr); + if allocations != 0 { + abort!("exchange heap not empty on exit\ + %i dangling allocations", allocations); + } + } +} + fn get_box_size(body_size: uint, body_align: uint) -> uint { let header_size = size_of::(); // FIXME (#2699): This alignment calculation is suspicious. Is it right? diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 10b5c78f99e5d..0a269aa8767e0 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -176,6 +176,8 @@ pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { sched.enqueue_task(main_task); sched.run(); + cleanup(); + return 0; } @@ -185,6 +187,10 @@ pub fn init(crate_map: *u8) { logging::init(crate_map); } +pub fn cleanup() { + global_heap::cleanup(); +} + /// Possible contexts in which Rust code may be executing. /// Different runtime services are available depending on context. /// Mostly used for determining if we're using the new scheduler From 021e81fbd311d93c94cbd40524ff53062fd7fe6e Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 17 Jun 2013 23:22:41 -0700 Subject: [PATCH 057/111] std::rt: move abort function to util module --- src/libstd/macros.rs | 10 +--------- src/libstd/rt/util.rs | 4 ++++ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs index 8d221fe6a1b7f..51da9344cabdc 100644 --- a/src/libstd/macros.rs +++ b/src/libstd/macros.rs @@ -39,18 +39,10 @@ macro_rules! rtassert ( ) -// The do_abort function was originally inside the abort macro, but -// this was ICEing the compiler so it has been moved outside. Now this -// seems to work? -#[allow(missing_doc)] -pub fn do_abort() -> ! { - unsafe { ::libc::abort(); } -} - macro_rules! abort( ($( $msg:expr),+) => ( { rtdebug!($($msg),+); - ::macros::do_abort(); + ::rt::util::abort(); } ) ) diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 39b9de90f3478..6153170ecafe3 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -19,4 +19,8 @@ pub fn num_cpus() -> uint { extern { fn rust_get_num_cpus() -> libc::uintptr_t; } +} + +pub fn abort() -> ! { + unsafe { libc::abort(); } } \ No newline at end of file From b5fbec9c1e519c7e6fa45e2157f6e746ef3f6849 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 17 Jun 2013 23:24:50 -0700 Subject: [PATCH 058/111] std: Rename `abort!` to `rtabort!` to match other macros --- src/libstd/macros.rs | 4 ++-- src/libstd/rt/global_heap.rs | 2 +- src/libstd/rt/local.rs | 24 ++++++++++++------------ src/libstd/rt/local_ptr.rs | 2 +- src/libstd/rt/sched.rs | 14 +++++++------- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs index 51da9344cabdc..fed3ff461c441 100644 --- a/src/libstd/macros.rs +++ b/src/libstd/macros.rs @@ -33,13 +33,13 @@ macro_rules! rtdebug ( macro_rules! rtassert ( ( $arg:expr ) => ( { if !$arg { - abort!("assertion failed: %s", stringify!($arg)); + rtabort!("assertion failed: %s", stringify!($arg)); } } ) ) -macro_rules! abort( +macro_rules! rtabort( ($( $msg:expr),+) => ( { rtdebug!($($msg),+); ::rt::util::abort(); diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/global_heap.rs index 9f273fc8e6535..b180cec98387c 100644 --- a/src/libstd/rt/global_heap.rs +++ b/src/libstd/rt/global_heap.rs @@ -82,7 +82,7 @@ pub fn cleanup() { let count_ptr = exchange_count_ptr(); let allocations = atomic_load(&*count_ptr); if allocations != 0 { - abort!("exchange heap not empty on exit\ + rtabort!("exchange heap not empty on exit\ %i dangling allocations", allocations); } } diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index 6e0fbda5ec9a7..6df1ffaa453f3 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -38,17 +38,17 @@ impl Local for Scheduler { } match res { Some(r) => { r } - None => abort!("function failed!") + None => rtabort!("function failed!") } } unsafe fn unsafe_borrow() -> *mut Scheduler { local_ptr::unsafe_borrow() } - unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { abort!("unimpl") } + unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { rtabort!("unimpl") } } impl Local for Task { - fn put(_value: ~Task) { abort!("unimpl") } - fn take() -> ~Task { abort!("unimpl") } - fn exists() -> bool { abort!("unimpl") } + fn put(_value: ~Task) { rtabort!("unimpl") } + fn take() -> ~Task { rtabort!("unimpl") } + fn exists() -> bool { rtabort!("unimpl") } fn borrow(f: &fn(&mut Task) -> T) -> T { do Local::borrow:: |sched| { match sched.current_task { @@ -56,7 +56,7 @@ impl Local for Task { f(&mut *task.task) } None => { - abort!("no scheduler") + rtabort!("no scheduler") } } } @@ -69,7 +69,7 @@ impl Local for Task { } None => { // Don't fail. Infinite recursion - abort!("no scheduler") + rtabort!("no scheduler") } } } @@ -84,16 +84,16 @@ impl Local for Task { // XXX: This formulation won't work once ~IoFactoryObject is a real trait pointer impl Local for IoFactoryObject { - fn put(_value: ~IoFactoryObject) { abort!("unimpl") } - fn take() -> ~IoFactoryObject { abort!("unimpl") } - fn exists() -> bool { abort!("unimpl") } - fn borrow(_f: &fn(&mut IoFactoryObject) -> T) -> T { abort!("unimpl") } + fn put(_value: ~IoFactoryObject) { rtabort!("unimpl") } + fn take() -> ~IoFactoryObject { rtabort!("unimpl") } + fn exists() -> bool { rtabort!("unimpl") } + fn borrow(_f: &fn(&mut IoFactoryObject) -> T) -> T { rtabort!("unimpl") } unsafe fn unsafe_borrow() -> *mut IoFactoryObject { let sched = Local::unsafe_borrow::(); let io: *mut IoFactoryObject = (*sched).event_loop.io().unwrap(); return io; } - unsafe fn try_unsafe_borrow() -> Option<*mut IoFactoryObject> { abort!("unimpl") } + unsafe fn try_unsafe_borrow() -> Option<*mut IoFactoryObject> { rtabort!("unimpl") } } #[cfg(test)] diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs index 0db903f81eec7..cd7c5daa444d7 100644 --- a/src/libstd/rt/local_ptr.rs +++ b/src/libstd/rt/local_ptr.rs @@ -109,7 +109,7 @@ pub unsafe fn unsafe_borrow() -> *mut T { fn tls_key() -> tls::Key { match maybe_tls_key() { Some(key) => key, - None => abort!("runtime tls key not initialized") + None => rtabort!("runtime tls key not initialized") } } diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index be57247d51479..5c08298801212 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -357,7 +357,7 @@ impl Scheduler { home_handle.send(PinnedTask(task)); } AnySched => { - abort!("error: cannot send anysched task home"); + rtabort!("error: cannot send anysched task home"); } } } @@ -422,10 +422,10 @@ impl Scheduler { return true; } 4 => { - abort!("task home was None!"); + rtabort!("task home was None!"); } _ => { - abort!("literally, you should not be here"); + rtabort!("literally, you should not be here"); } } } @@ -452,7 +452,7 @@ impl Scheduler { dead_task.take().recycle(&mut sched.stack_pool); } - abort!("control reached end of task"); + rtabort!("control reached end of task"); } pub fn schedule_task(~self, task: ~Coroutine) { @@ -672,7 +672,7 @@ impl Coroutine { Some(Sched(SchedHandle { sched_id: ref id, _ })) => { *id == sched.sched_id() } - None => { abort!("error: homeless task!"); } + None => { rtabort!("error: homeless task!"); } } } } @@ -696,7 +696,7 @@ impl Coroutine { match self.task.home { Some(AnySched) => { false } Some(Sched(_)) => { true } - None => { abort!("error: homeless task!"); + None => { rtabort!("error: homeless task!"); } } } @@ -710,7 +710,7 @@ impl Coroutine { Some(Sched(SchedHandle { sched_id: ref id, _})) => { *id == sched.sched_id() } - None => { abort!("error: homeless task!"); } + None => { rtabort!("error: homeless task!"); } } } From 5b2dc520340103491088616ba4f58095948f5821 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 18 Jun 2013 00:17:14 -0700 Subject: [PATCH 059/111] std::rt: Turn on multithreaded scheduling --- src/libstd/rt/mod.rs | 92 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 16 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 0a269aa8767e0..581e3addff0f4 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -60,7 +60,21 @@ Several modules in `core` are clients of `rt`: #[deny(unused_variable)]; use cell::Cell; +use clone::Clone; +use container::Container; +use from_str::FromStr; +use iterator::IteratorUtil; +use option::{Some, None}; +use os; use ptr::RawPtr; +use uint; +use rt::sched::{Scheduler, Coroutine, Shutdown}; +use rt::sleeper_list::SleeperList; +use rt::task::Task; +use rt::thread::Thread; +use rt::work_queue::WorkQueue; +use rt::uv::uvio::UvEventLoop; +use vec::{OwnedVector, MutableVector}; /// The global (exchange) heap. pub mod global_heap; @@ -159,23 +173,8 @@ pub mod util; /// The return value is used as the process return code. 0 on success, 101 on error. pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { - use self::sched::{Scheduler, Coroutine}; - use self::work_queue::WorkQueue; - use self::uv::uvio::UvEventLoop; - use self::sleeper_list::SleeperList; - init(crate_map); - - let loop_ = ~UvEventLoop::new(); - let work_queue = WorkQueue::new(); - let sleepers = SleeperList::new(); - let mut sched = ~Scheduler::new(loop_, work_queue, sleepers); - sched.no_sleep = true; - let main_task = ~Coroutine::new_root(&mut sched.stack_pool, main); - - sched.enqueue_task(main_task); - sched.run(); - + run(main); cleanup(); return 0; @@ -191,6 +190,67 @@ pub fn cleanup() { global_heap::cleanup(); } +pub fn run(main: ~fn()) { + let nthreads = match os::getenv("RUST_THREADS") { + Some(nstr) => FromStr::from_str(nstr).get(), + None => unsafe { + // Using more threads than cores in test code + // to force the OS to preempt them frequently. + // Assuming that this help stress test concurrent types. + util::num_cpus() * 2 + } + }; + + let sleepers = SleeperList::new(); + let work_queue = WorkQueue::new(); + + let mut handles = ~[]; + let mut scheds = ~[]; + + for uint::range(0, nthreads) |_| { + let loop_ = ~UvEventLoop::new(); + let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); + let handle = sched.make_handle(); + + handles.push(handle); + scheds.push(sched); + } + + let main_cell = Cell::new(main); + let handles = Cell::new(handles); + let mut new_task = ~Task::new_root(); + let on_exit: ~fn(bool) = |exit_status| { + + let mut handles = handles.take(); + // Tell schedulers to exit + for handles.mut_iter().advance |handle| { + handle.send(Shutdown); + } + + rtassert!(exit_status); + }; + new_task.on_exit = Some(on_exit); + let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, + new_task, main_cell.take()); + scheds[0].enqueue_task(main_task); + + let mut threads = ~[]; + + while !scheds.is_empty() { + let sched = scheds.pop(); + let sched_cell = Cell::new(sched); + let thread = do Thread::start { + let sched = sched_cell.take(); + sched.run(); + }; + + threads.push(thread); + } + + // Wait for schedulers + let _threads = threads; +} + /// Possible contexts in which Rust code may be executing. /// Different runtime services are available depending on context. /// Mostly used for determining if we're using the new scheduler From 29ad8e15a2b7e2024941d74ea4ce261cb501ded9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 18 Jun 2013 14:23:37 -0700 Subject: [PATCH 060/111] std::rt: Improve the rtabort! macro --- src/libstd/macros.rs | 19 +++++------ src/libstd/rt/global_heap.rs | 3 +- src/libstd/rt/util.rs | 65 ++++++++++++++++++++++++++++++++++-- 3 files changed, 72 insertions(+), 15 deletions(-) diff --git a/src/libstd/macros.rs b/src/libstd/macros.rs index fed3ff461c441..7748c43efcd28 100644 --- a/src/libstd/macros.rs +++ b/src/libstd/macros.rs @@ -10,18 +10,16 @@ #[macro_escape]; +macro_rules! rterrln ( + ($( $arg:expr),+) => ( { + ::rt::util::dumb_println(fmt!( $($arg),+ )); + } ) +) + // Some basic logging macro_rules! rtdebug_ ( ($( $arg:expr),+) => ( { - dumb_println(fmt!( $($arg),+ )); - - fn dumb_println(s: &str) { - use io::WriterUtil; - let dbg = ::libc::STDERR_FILENO as ::io::fd_t; - dbg.write_str(s); - dbg.write_str("\n"); - } - + rterrln!( $($arg),+ ) } ) ) @@ -41,8 +39,7 @@ macro_rules! rtassert ( macro_rules! rtabort( ($( $msg:expr),+) => ( { - rtdebug!($($msg),+); - ::rt::util::abort(); + ::rt::util::abort(fmt!($($msg),+)); } ) ) diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/global_heap.rs index b180cec98387c..e89df2b1c93f5 100644 --- a/src/libstd/rt/global_heap.rs +++ b/src/libstd/rt/global_heap.rs @@ -82,8 +82,7 @@ pub fn cleanup() { let count_ptr = exchange_count_ptr(); let allocations = atomic_load(&*count_ptr); if allocations != 0 { - rtabort!("exchange heap not empty on exit\ - %i dangling allocations", allocations); + rtabort!("exchange heap not empty on exit - %i dangling allocations", allocations); } } } diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 6153170ecafe3..904b2f8bbb932 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -8,7 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use container::Container; +use iterator::IteratorUtil; use libc; +use str::StrSlice; /// Get the number of cores available pub fn num_cpus() -> uint { @@ -21,6 +24,64 @@ pub fn num_cpus() -> uint { } } -pub fn abort() -> ! { +pub fn dumb_println(s: &str) { + use io::WriterUtil; + let dbg = ::libc::STDERR_FILENO as ::io::fd_t; + dbg.write_str(s); + dbg.write_str("\n"); +} + +pub fn abort(msg: &str) -> ! { + let msg = if !msg.is_empty() { msg } else { "aborted" }; + let hash = msg.iter().fold(0, |accum, val| accum + (val as uint) ); + let quote = match hash % 10 { + 0 => " +It was from the artists and poets that the pertinent answers came, and I +know that panic would have broken loose had they been able to compare notes. +As it was, lacking their original letters, I half suspected the compiler of +having asked leading questions, or of having edited the correspondence in +corroboration of what he had latently resolved to see.", + 1 => " +There are not many persons who know what wonders are opened to them in the +stories and visions of their youth; for when as children we listen and dream, +we think but half-formed thoughts, and when as men we try to remember, we are +dulled and prosaic with the poison of life. But some of us awake in the night +with strange phantasms of enchanted hills and gardens, of fountains that sing +in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch +down to sleeping cities of bronze and stone, and of shadowy companies of heroes +that ride caparisoned white horses along the edges of thick forests; and then +we know that we have looked back through the ivory gates into that world of +wonder which was ours before we were wise and unhappy.", + 2 => " +Instead of the poems I had hoped for, there came only a shuddering blackness +and ineffable loneliness; and I saw at last a fearful truth which no one had +ever dared to breathe before — the unwhisperable secret of secrets — The fact +that this city of stone and stridor is not a sentient perpetuation of Old New +York as London is of Old London and Paris of Old Paris, but that it is in fact +quite dead, its sprawling body imperfectly embalmed and infested with queer +animate things which have nothing to do with it as it was in life.", + 3 => " +The ocean ate the last of the land and poured into the smoking gulf, thereby +giving up all it had ever conquered. From the new-flooded lands it flowed +again, uncovering death and decay; and from its ancient and immemorial bed it +trickled loathsomely, uncovering nighted secrets of the years when Time was +young and the gods unborn. Above the waves rose weedy remembered spires. The +moon laid pale lilies of light on dead London, and Paris stood up from its damp +grave to be sanctified with star-dust. Then rose spires and monoliths that were +weedy but not remembered; terrible spires and monoliths of lands that men never +knew were lands...", + 4 => " +There was a night when winds from unknown spaces whirled us irresistibly into +limitless vacum beyond all thought and entity. Perceptions of the most +maddeningly untransmissible sort thronged upon us; perceptions of infinity +which at the time convulsed us with joy, yet which are now partly lost to my +memory and partly incapable of presentation to others.", + _ => "You've met with a terrible fate, haven't you?" + }; + rterrln!("%s", ""); + rterrln!("%s", quote); + rterrln!("%s", ""); + rterrln!("fatal runtime error: %s", msg); + unsafe { libc::abort(); } -} \ No newline at end of file +} From 915aaa7f67671186348b1b6c10d765a3d9ab6e37 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 19 Jun 2013 00:39:10 -0700 Subject: [PATCH 061/111] std::rt: Set the process exit code --- src/libstd/rt/mod.rs | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 581e3addff0f4..899fa171b727e 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -74,6 +74,8 @@ use rt::task::Task; use rt::thread::Thread; use rt::work_queue::WorkQueue; use rt::uv::uvio::UvEventLoop; +use unstable::atomics::{AtomicInt, SeqCst}; +use unstable::sync::UnsafeAtomicRcBox; use vec::{OwnedVector, MutableVector}; /// The global (exchange) heap. @@ -174,10 +176,10 @@ pub mod util; pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { init(crate_map); - run(main); + let exit_code = run(main); cleanup(); - return 0; + return exit_code; } /// One-time runtime initialization. Currently all this does is set up logging @@ -190,7 +192,9 @@ pub fn cleanup() { global_heap::cleanup(); } -pub fn run(main: ~fn()) { +pub fn run(main: ~fn()) -> int { + static DEFAULT_ERROR_CODE: int = 101; + let nthreads = match os::getenv("RUST_THREADS") { Some(nstr) => FromStr::from_str(nstr).get(), None => unsafe { @@ -216,10 +220,13 @@ pub fn run(main: ~fn()) { scheds.push(sched); } + let exit_code = UnsafeAtomicRcBox::new(AtomicInt::new(0)); + let exit_code_clone = exit_code.clone(); + let main_cell = Cell::new(main); let handles = Cell::new(handles); let mut new_task = ~Task::new_root(); - let on_exit: ~fn(bool) = |exit_status| { + let on_exit: ~fn(bool) = |exit_success| { let mut handles = handles.take(); // Tell schedulers to exit @@ -227,7 +234,10 @@ pub fn run(main: ~fn()) { handle.send(Shutdown); } - rtassert!(exit_status); + unsafe { + let exit_code = if exit_success { 0 } else { DEFAULT_ERROR_CODE }; + (*exit_code_clone.get()).store(exit_code, SeqCst); + } }; new_task.on_exit = Some(on_exit); let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, @@ -249,6 +259,10 @@ pub fn run(main: ~fn()) { // Wait for schedulers let _threads = threads; + + unsafe { + (*exit_code.get()).load(SeqCst) + } } /// Possible contexts in which Rust code may be executing. From 5722c953e5180ae3e086b4354f65ee8b5fb8d868 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 19 Jun 2013 00:49:05 -0700 Subject: [PATCH 062/111] std::rt: Correct the numbers of default cores --- src/libstd/rt/mod.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 899fa171b727e..58da7549b3f21 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -197,12 +197,7 @@ pub fn run(main: ~fn()) -> int { let nthreads = match os::getenv("RUST_THREADS") { Some(nstr) => FromStr::from_str(nstr).get(), - None => unsafe { - // Using more threads than cores in test code - // to force the OS to preempt them frequently. - // Assuming that this help stress test concurrent types. - util::num_cpus() * 2 - } + None => unsafe { util::num_cpus() } }; let sleepers = SleeperList::new(); From e1555f9b5628af2b6c6ed344cad621399cb7684d Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 19 Jun 2013 01:08:47 -0700 Subject: [PATCH 063/111] std::rt: Document and cleanup the run function --- src/libstd/rt/mod.rs | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 58da7549b3f21..dd4c71eca7459 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -63,11 +63,11 @@ use cell::Cell; use clone::Clone; use container::Container; use from_str::FromStr; +use iter::Times; use iterator::IteratorUtil; use option::{Some, None}; use os; use ptr::RawPtr; -use uint; use rt::sched::{Scheduler, Coroutine, Shutdown}; use rt::sleeper_list::SleeperList; use rt::task::Task; @@ -150,7 +150,7 @@ pub mod local_ptr; /// Bindings to pthread/windows thread-local storage. pub mod thread_local_storage; -/// A concurrent data structure with which parent tasks wait on child tasks. +/// For waiting on child tasks. pub mod join_latch; pub mod metrics; @@ -188,11 +188,18 @@ pub fn init(crate_map: *u8) { logging::init(crate_map); } +/// One-time runtime cleanup. pub fn cleanup() { global_heap::cleanup(); } +/// Execute the main function in a scheduler. +/// +/// Configures the runtime according to the environment, by default +/// using a task scheduler with the same number of threads as cores. +/// Returns a process exit code. pub fn run(main: ~fn()) -> int { + static DEFAULT_ERROR_CODE: int = 101; let nthreads = match os::getenv("RUST_THREADS") { @@ -200,31 +207,39 @@ pub fn run(main: ~fn()) -> int { None => unsafe { util::num_cpus() } }; + // The shared list of sleeping schedulers. Schedulers wake each other + // occassionally to do new work. let sleepers = SleeperList::new(); + // The shared work queue. Temporary until work stealing is implemented. let work_queue = WorkQueue::new(); - let mut handles = ~[]; + // The schedulers. let mut scheds = ~[]; + // Handles to the schedulers. When the main task ends these will be + // sent the Shutdown message to terminate the schedulers. + let mut handles = ~[]; - for uint::range(0, nthreads) |_| { + for nthreads.times { + // Every scheduler is driven by an I/O event loop. let loop_ = ~UvEventLoop::new(); let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); let handle = sched.make_handle(); - handles.push(handle); scheds.push(sched); + handles.push(handle); } + // Create a shared cell for transmitting the process exit + // code from the main task to this function. let exit_code = UnsafeAtomicRcBox::new(AtomicInt::new(0)); let exit_code_clone = exit_code.clone(); - let main_cell = Cell::new(main); + // When the main task exits, after all the tasks in the main + // task tree, shut down the schedulers and set the exit code. let handles = Cell::new(handles); - let mut new_task = ~Task::new_root(); let on_exit: ~fn(bool) = |exit_success| { let mut handles = handles.take(); - // Tell schedulers to exit for handles.mut_iter().advance |handle| { handle.send(Shutdown); } @@ -234,13 +249,17 @@ pub fn run(main: ~fn()) -> int { (*exit_code_clone.get()).store(exit_code, SeqCst); } }; + + // Create and enqueue the main task. + let main_cell = Cell::new(main); + let mut new_task = ~Task::new_root(); new_task.on_exit = Some(on_exit); let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, new_task, main_cell.take()); scheds[0].enqueue_task(main_task); + // Run each scheduler in a thread. let mut threads = ~[]; - while !scheds.is_empty() { let sched = scheds.pop(); let sched_cell = Cell::new(sched); @@ -253,8 +272,9 @@ pub fn run(main: ~fn()) -> int { } // Wait for schedulers - let _threads = threads; + { let _threads = threads; } + // Return the exit code unsafe { (*exit_code.get()).load(SeqCst) } From d777ba01cbfc01d1241722e2c27fcd4a4650a300 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 19 Jun 2013 15:20:28 -0700 Subject: [PATCH 064/111] Wrote the Eq instance of IpAddr in a slightly different way. --- src/libstd/rt/io/net/ip.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libstd/rt/io/net/ip.rs b/src/libstd/rt/io/net/ip.rs index f885c7f278861..84abc058c333c 100644 --- a/src/libstd/rt/io/net/ip.rs +++ b/src/libstd/rt/io/net/ip.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp::{Eq, TotalEq, eq}; +use std::cmp::{Eq, TotalEq}; pub enum IpAddr { Ipv4(u8, u8, u8, u8, u16), @@ -18,13 +18,13 @@ pub enum IpAddr { impl Eq for IpAddr { fn eq(&self, other: &IpAddr) -> bool { match (*self, *other) { - (Ipv4(a,b,c,d,e), Ipv4(f,g,h,i,j)) => a == f && b == g && c == h && d == i && e == j, + (Ipv4(a,b,c,d,e), Ipv4(f,g,h,i,j)) => (a,b,c,d,e) == (f,g,h,i,j), (Ipv6, Ipv6) => fail!(), _ => false } } fn ne(&self, other: &IpAddr) -> bool { - !eq(self, other) + !(self == other) } } From 753b497b4e7f5445bd5781572568b2b5cf0ce67d Mon Sep 17 00:00:00 2001 From: toddaaro Date: Wed, 19 Jun 2013 15:23:14 -0700 Subject: [PATCH 065/111] Modified a match in resume_task_from_queue that was returning an int that was then matched on to instead use an enum. --- src/libstd/rt/sched.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 5c08298801212..453d3303db668 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -381,52 +381,44 @@ impl Scheduler { match home { &Some(Sched(ref home_handle)) if home_handle.sched_id != this.sched_id() => { - 0 + SendHome } &Some(AnySched) if this.run_anything => { - 1 + ResumeNow } &Some(AnySched) => { - 2 + Requeue } &Some(Sched(_)) => { - 3 + ResumeNow } &None => { - 4 + Homeless } } }; match action_id { - 0 => { + SendHome => { rtdebug!("sending task home"); Scheduler::send_task_home(task); Local::put(this); return false; } - 1 => { + ResumeNow => { rtdebug!("resuming now"); this.resume_task_immediately(task); return true; } - 2 => { + Requeue => { rtdebug!("re-queueing") this.enqueue_task(task); Local::put(this); return false; } - 3 => { - rtdebug!("resuming now"); - this.resume_task_immediately(task); - return true; - } - 4 => { + Homeless => { rtabort!("task home was None!"); } - _ => { - rtabort!("literally, you should not be here"); - } } } @@ -654,6 +646,14 @@ impl Scheduler { } } +// The cases for the below function. +enum ResumeAction { + SendHome, + Requeue, + ResumeNow, + Homeless +} + impl SchedHandle { pub fn send(&mut self, msg: SchedMessage) { self.queue.push(msg); From 083c692565340791b06ab67d66c4c95d63b222cb Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 19 Jun 2013 15:39:18 -0700 Subject: [PATCH 066/111] Changed visibility from being on the impl to being on methods per language syntax change. --- src/libstd/rt/uv/net.rs | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 5491b82b72521..c88d96bd2306e 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -17,6 +17,7 @@ use rt::uv::{Loop, Watcher, Request, UvError, Buf, NativeHandle, NullCallback, status_to_maybe_uv_error}; use rt::io::net::ip::{IpAddr, Ipv4, Ipv6}; use rt::uv::last_uv_error; +use vec; fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { match addr { @@ -161,7 +162,7 @@ pub struct TcpWatcher(*uvll::uv_tcp_t); impl Watcher for TcpWatcher { } impl TcpWatcher { - pub fn new(loop_: &mut Loop) -> TcpWatcher { + pub fn new(loop_: &Loop) -> TcpWatcher { unsafe { let handle = malloc_handle(UV_TCP); assert!(handle.is_not_null()); @@ -264,8 +265,8 @@ impl NativeHandle<*uvll::uv_tcp_t> for TcpWatcher { pub struct UdpWatcher(*uvll::uv_udp_t); impl Watcher for UdpWatcher { } -pub impl UdpWatcher { - fn new(loop_: &mut Loop) -> UdpWatcher { +impl UdpWatcher { + pub fn new(loop_: &mut Loop) -> UdpWatcher { unsafe { let handle = malloc_handle(UV_UDP); assert!(handle.is_not_null()); @@ -276,7 +277,7 @@ pub impl UdpWatcher { } } - fn bind(&mut self, address: IpAddr) -> Result<(), UvError> { + pub fn bind(&mut self, address: IpAddr) -> Result<(), UvError> { match address { Ipv4(*) => { do ip4_as_uv_ip4(address) |addr| { @@ -294,7 +295,7 @@ pub impl UdpWatcher { } } - fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { + pub fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { { let data = self.get_watcher_data(); data.alloc_cb = Some(alloc); @@ -325,12 +326,12 @@ pub impl UdpWatcher { } } - fn recv_stop(&mut self) { + pub fn recv_stop(&mut self) { let handle = self.native_handle(); unsafe { uvll::udp_recv_stop(handle); } } - fn send(&mut self, buf: Buf, address: IpAddr, cb: UdpSendCallback) { + pub fn send(&mut self, buf: Buf, address: IpAddr, cb: UdpSendCallback) { { let data = self.get_watcher_data(); assert!(data.udp_send_cb.is_none()); @@ -366,7 +367,7 @@ pub impl UdpWatcher { } } - fn close(self, cb: NullCallback) { + pub fn close(self, cb: NullCallback) { { let mut this = self; let data = this.get_watcher_data(); @@ -470,11 +471,10 @@ impl NativeHandle<*uvll::uv_write_t> for WriteRequest { } pub struct UdpSendRequest(*uvll::uv_udp_send_t); - impl Request for UdpSendRequest { } -pub impl UdpSendRequest { - fn new() -> UdpSendRequest { +impl UdpSendRequest { + pub fn new() -> UdpSendRequest { let send_handle = unsafe { malloc_req(UV_UDP_SEND) }; @@ -483,14 +483,14 @@ pub impl UdpSendRequest { UdpSendRequest(send_handle) } - fn handle(&self) -> UdpWatcher { + pub fn handle(&self) -> UdpWatcher { unsafe { let udp_handle = uvll::get_udp_handle_from_send_req(self.native_handle()); NativeHandle::from_native_handle(udp_handle) } } - fn delete(self) { + pub fn delete(self) { unsafe { free_req(self.native_handle() as *c_void) } } } From 5086c0850ebdd8407901d108f312ab141e4a4a18 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 19 Jun 2013 16:08:07 -0700 Subject: [PATCH 067/111] std::rt: Update GC metadata in init --- src/libstd/rt/mod.rs | 5 +++++ src/rt/rust_gc_metadata.cpp | 5 +++++ src/rt/rustrt.def.in | 1 + 3 files changed, 11 insertions(+) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index dd4c71eca7459..a80fb15bad798 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -186,6 +186,11 @@ pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { /// based on the RUST_LOG environment variable. pub fn init(crate_map: *u8) { logging::init(crate_map); + unsafe { rust_update_gc_metadata(crate_map) } + + extern { + fn rust_update_gc_metadata(crate_map: *u8); + } } /// One-time runtime cleanup. diff --git a/src/rt/rust_gc_metadata.cpp b/src/rt/rust_gc_metadata.cpp index fbf0575b31dcc..e37856255a7d6 100644 --- a/src/rt/rust_gc_metadata.cpp +++ b/src/rt/rust_gc_metadata.cpp @@ -79,6 +79,11 @@ rust_gc_metadata() { return (void *)global_safe_points; } +extern "C" CDECL void +rust_update_gc_metadata(const void* map) { + update_gc_metadata(map); +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 9b49583519eca..c93d29f6148c6 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -178,6 +178,7 @@ rust_call_tydesc_glue tdefl_compress_mem_to_heap tinfl_decompress_mem_to_heap rust_gc_metadata +rust_update_gc_metadata rust_uv_ip4_port rust_uv_ip6_port rust_uv_tcp_getpeername From ac49b74e8254b3129694f5a5425dbda4ffc4b186 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 19 Jun 2013 17:23:55 -0700 Subject: [PATCH 068/111] socket based UDP io --- src/libstd/rt/io/net/udp.rs | 101 ++++++++++++++++++++++++-------- src/libstd/rt/rtio.rs | 14 ++--- src/libstd/rt/uv/uvio.rs | 112 +++++++++++++++++++++++------------- 3 files changed, 157 insertions(+), 70 deletions(-) diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index 6275eff924988..ac5a118f22a59 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -8,47 +8,100 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::{Option}; +use option::{Option, Some, None}; +use result::{Ok, Err}; use rt::io::net::ip::IpAddr; -use rt::io::{Reader, Writer, Listener}; -use rt::rtio::{RtioUdpStreamObject}; +use rt::io::{Reader, Writer}; +use rt::io::{io_error, read_error, EndOfFile}; +use rt::rtio::{RtioUdpSocketObject, RtioUdpSocket, IoFactory, IoFactoryObject}; +use rt::local::Local; + +pub struct UdpSocket { + rtsocket: ~RtioUdpSocketObject +} + +impl UdpSocket { + fn new(s: ~RtioUdpSocketObject) -> UdpSocket { + UdpSocket { rtsocket: s } + } + + pub fn bind(addr: IpAddr) -> Option { + let socket = unsafe { + let io = Local::unsafe_borrow::(); + (*io).udp_bind(addr) + }; + match socket { + Ok(s) => { Some(UdpSocket { rtsocket: s }) } + Err(ioerr) => { + io_error::cond.raise(ioerr); + return None; + } + } + } + + pub fn recvfrom(&self, buf: &mut [u8]) -> Option<(uint, IpAddr)> { + match (*self.rtsocket).recvfrom(buf) { + Ok((nread, src)) => Some((nread, src)), + Err(ioerr) => { + // EOF is indicated by returning None + // XXX do we ever find EOF reading UDP packets? + if ioerr.kind != EndOfFile { + read_error::cond.raise(ioerr); + } + None + } + } + } + + pub fn sendto(&self, buf: &[u8], dst: IpAddr) { + match (*self.rtsocket).sendto(buf, dst) { + Ok(_) => (), + Err(ioerr) => { + io_error::cond.raise(ioerr); + } + } + } + + // XXX convert ~self to self eventually + pub fn connect(~self, other: IpAddr) -> UdpStream { + UdpStream { socket: self, connectedTo: other } + } +} pub struct UdpStream { - rtstream: ~RtioUdpStreamObject + socket: ~UdpSocket, + connectedTo: IpAddr } impl UdpStream { - fn new(s: ~RtioUdpStreamObject) -> UdpStream { - UdpStream { - rtstream: s - } + pub fn as_socket(&self, f: &fn(&UdpSocket) -> T) -> T { + f(self.socket) } - pub fn connect(_addr: IpAddr) -> Option { - fail!() + pub fn disconnect(self) -> ~UdpSocket { + let UdpStream { socket: s, _ } = self; + s } } impl Reader for UdpStream { - fn read(&mut self, _buf: &mut [u8]) -> Option { fail!() } + fn read(&mut self, buf: &mut [u8]) -> Option { + let conn = self.connectedTo; + do self.as_socket |sock| { + sock.recvfrom(buf) + .map_consume(|(nread,src)| if src == conn {nread} else {0}) + } + } fn eof(&mut self) -> bool { fail!() } } impl Writer for UdpStream { - fn write(&mut self, _buf: &[u8]) { fail!() } - - fn flush(&mut self) { fail!() } -} - -pub struct UdpListener; - -impl UdpListener { - pub fn bind(_addr: IpAddr) -> Option { - fail!() + fn write(&mut self, buf: &[u8]) { + do self.as_socket |sock| { + sock.sendto(buf, self.connectedTo); + } } -} -impl Listener for UdpListener { - fn accept(&mut self) -> Option { fail!() } + fn flush(&mut self) { fail!() } } diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 0eebbb61e5b56..e38c952f744f0 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -22,7 +22,7 @@ pub type RemoteCallbackObject = uvio::UvRemoteCallback; pub type IoFactoryObject = uvio::UvIoFactory; pub type RtioTcpStreamObject = uvio::UvTcpStream; pub type RtioTcpListenerObject = uvio::UvTcpListener; -pub type RtioUdpStreamObject = uvio::UvUdpStream; +pub type RtioUdpSocketObject = uvio::UvUdpSocket; pub trait EventLoop { fn run(&mut self); @@ -45,7 +45,7 @@ pub trait RemoteCallback { pub trait IoFactory { fn tcp_connect(&mut self, addr: IpAddr) -> Result<~RtioTcpStreamObject, IoError>; fn tcp_bind(&mut self, addr: IpAddr) -> Result<~RtioTcpListenerObject, IoError>; - // TODO fn udp_connect(&mut self, addr: IpAddr) -> Result<~RtioUdpStreamObject, IoError>; + fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError>; } pub trait RtioTcpListener { @@ -53,11 +53,11 @@ pub trait RtioTcpListener { } pub trait RtioTcpStream { - fn read(&mut self, buf: &mut [u8]) -> Result; - fn write(&mut self, buf: &[u8]) -> Result<(), IoError>; + fn read(&self, buf: &mut [u8]) -> Result; + fn write(&self, buf: &[u8]) -> Result<(), IoError>; } -pub trait RtioUdpStream { - fn read(&mut self, buf: &mut [u8]) -> Result; - fn write(&mut self, buf: &[u8]) -> Result<(), IoError>; +pub trait RtioUdpSocket { + fn recvfrom(&self, buf: &mut [u8]) -> Result<(uint, IpAddr), IoError>; + fn sendto(&self, buf: &[u8], dst: IpAddr) -> Result<(), IoError>; } diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index eba84e537f999..828078f48654e 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -25,6 +25,7 @@ use rt::io::{standard_error, OtherIoError}; use rt::tube::Tube; use rt::local::Local; use unstable::sync::{Exclusive, exclusive}; +use rt::uv::net::uv_ip4_to_ip4; #[cfg(test)] use container::Container; #[cfg(test)] use uint; @@ -260,6 +261,24 @@ impl IoFactory for UvIoFactory { } } } + + fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError> { + let mut watcher = UdpWatcher::new(self.uv_loop()); + match watcher.bind(addr) { + Ok(_) => Ok(~UvUdpSocket { watcher: watcher }), + Err(uverr) => { + let scheduler = Local::take::(); + do scheduler.deschedule_running_task_and_then |_, task| { + let task_cell = Cell::new(task); + do watcher.close { + let scheduler = Local::take::(); + scheduler.resume_task_immediately(task_cell.take()); + } + } + Err(uv_error_to_io_error(uverr)) + } + } + } } // FIXME #6090: Prefer newtype structs but Drop doesn't work @@ -358,7 +377,7 @@ impl Drop for UvTcpStream { } impl RtioTcpStream for UvTcpStream { - fn read(&mut self, buf: &mut [u8]) -> Result { + fn read(&self, buf: &mut [u8]) -> Result { let result_cell = Cell::new_empty(); let result_cell_ptr: *Cell> = &result_cell; @@ -403,7 +422,7 @@ impl RtioTcpStream for UvTcpStream { return result_cell.take(); } - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { + fn write(&self, buf: &[u8]) -> Result<(), IoError> { let result_cell = Cell::new_empty(); let result_cell_ptr: *Cell> = &result_cell; let scheduler = Local::take::(); @@ -433,23 +452,21 @@ impl RtioTcpStream for UvTcpStream { } } -pub struct UvUdpStream { - watcher: UdpWatcher, - address: IpAddr +pub struct UvUdpSocket { + watcher: UdpWatcher } -impl UvUdpStream { +impl UvUdpSocket { fn watcher(&self) -> UdpWatcher { self.watcher } - fn address(&self) -> IpAddr { self.address } } -impl Drop for UvUdpStream { +impl Drop for UvUdpSocket { fn finalize(&self) { - rtdebug!("closing udp stream"); + rtdebug!("closing udp socket"); let watcher = self.watcher(); let scheduler = Local::take::(); do scheduler.deschedule_running_task_and_then |_, task| { - let task_cell = Cell(task); + let task_cell = Cell::new(task); do watcher.close { let scheduler = Local::take::(); scheduler.resume_task_immediately(task_cell.take()); @@ -458,40 +475,31 @@ impl Drop for UvUdpStream { } } -impl RtioUdpStream for UvUdpStream { - fn read(&mut self, buf: &mut [u8]) -> Result { - let result_cell = empty_cell(); - let result_cell_ptr: *Cell> = &result_cell; +impl RtioUdpSocket for UvUdpSocket { + fn recvfrom(&self, buf: &mut [u8]) -> Result<(uint, IpAddr), IoError> { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; let scheduler = Local::take::(); assert!(scheduler.in_task_context()); let watcher = self.watcher(); - let connection_address = self.address(); let buf_ptr: *&mut [u8] = &buf; do scheduler.deschedule_running_task_and_then |sched, task| { - rtdebug!("read: entered scheduler context"); + rtdebug!("recvfrom: entered scheduler context"); assert!(!sched.in_task_context()); let mut watcher = watcher; - let task_cell = Cell(task); - // XXX: see note in RtioTcpStream implementation for UvTcpStream - let alloc: AllocCallback = |_| unsafe { - slice_to_uv_buf(*buf_ptr) - }; - do watcher.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { - let _ = flags; // TODO actually use flags + let task_cell = Cell::new(task); + let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; + do watcher.recv_start(alloc) |watcher, nread, buf, addr, flags, status| { + let _ = flags; // TODO + let _ = buf; // TODO - // XXX: see note in RtioTcpStream implementation for UvTcpStream let mut watcher = watcher; watcher.recv_stop(); - let incoming_address = net::uv_ip4_to_ip4(&addr); let result = if status.is_none() { assert!(nread >= 0); - if incoming_address != connection_address { - Ok(0u) - } else { - Ok(nread as uint) - } + Ok((nread as uint, uv_ip4_to_ip4(&addr))) } else { Err(uv_error_to_io_error(status.unwrap())) }; @@ -505,11 +513,37 @@ impl RtioUdpStream for UvUdpStream { assert!(!result_cell.is_empty()); return result_cell.take(); + } + fn sendto(&self, buf: &[u8], dst: IpAddr) -> Result<(), IoError> { + let result_cell = Cell::new_empty(); + let result_cell_ptr: *Cell> = &result_cell; + let scheduler = Local::take::(); + assert!(scheduler.in_task_context()); + let watcher = self.watcher(); + let buf_ptr: *&[u8] = &buf; + do scheduler.deschedule_running_task_and_then |_, task| { + let mut watcher = watcher; + let task_cell = Cell::new(task); + let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + do watcher.send(buf, dst) |watcher, status| { + let _ = watcher; // TODO + + let result = if status.is_none() { + Ok(()) + } else { + Err(uv_error_to_io_error(status.unwrap())) + }; + + unsafe { (*result_cell_ptr).put_back(result); } + + let scheduler = Local::take::(); + scheduler.resume_task_immediately(task_cell.take()); + } + } - fn write(&mut self, buf: &[u8]) -> Result<(), IoError> { - let _ = buf; - fail!() + assert!(!result_cell.is_empty()); + return result_cell.take(); } } @@ -535,7 +569,7 @@ fn test_simple_tcp_server_and_client() { unsafe { let io = Local::unsafe_borrow::(); let mut listener = (*io).tcp_bind(addr).unwrap(); - let mut stream = listener.accept().unwrap(); + let stream = listener.accept().unwrap(); let mut buf = [0, .. 2048]; let nread = stream.read(buf).unwrap(); assert_eq!(nread, 8); @@ -549,7 +583,7 @@ fn test_simple_tcp_server_and_client() { do spawntask_immediately { unsafe { let io = Local::unsafe_borrow::(); - let mut stream = (*io).tcp_connect(addr).unwrap(); + let stream = (*io).tcp_connect(addr).unwrap(); stream.write([0, 1, 2, 3, 4, 5, 6, 7]); } } @@ -564,7 +598,7 @@ fn test_read_and_block() { do spawntask_immediately { let io = unsafe { Local::unsafe_borrow::() }; let mut listener = unsafe { (*io).tcp_bind(addr).unwrap() }; - let mut stream = listener.accept().unwrap(); + let stream = listener.accept().unwrap(); let mut buf = [0, .. 2048]; let expected = 32; @@ -597,7 +631,7 @@ fn test_read_and_block() { do spawntask_immediately { unsafe { let io = Local::unsafe_borrow::(); - let mut stream = (*io).tcp_connect(addr).unwrap(); + let stream = (*io).tcp_connect(addr).unwrap(); stream.write([0, 1, 2, 3, 4, 5, 6, 7]); stream.write([0, 1, 2, 3, 4, 5, 6, 7]); stream.write([0, 1, 2, 3, 4, 5, 6, 7]); @@ -618,7 +652,7 @@ fn test_read_read_read() { unsafe { let io = Local::unsafe_borrow::(); let mut listener = (*io).tcp_bind(addr).unwrap(); - let mut stream = listener.accept().unwrap(); + let stream = listener.accept().unwrap(); let buf = [1, .. 2048]; let mut total_bytes_written = 0; while total_bytes_written < MAX { @@ -631,7 +665,7 @@ fn test_read_read_read() { do spawntask_immediately { unsafe { let io = Local::unsafe_borrow::(); - let mut stream = (*io).tcp_connect(addr).unwrap(); + let stream = (*io).tcp_connect(addr).unwrap(); let mut buf = [0, .. 2048]; let mut total_bytes_read = 0; while total_bytes_read < MAX { From 36c0e04e57f165681408baeb0149f9a164479599 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 19 Jun 2013 17:39:02 -0700 Subject: [PATCH 069/111] derived instances of Eq and TotalEq for IpAddr rather than implement them manually. --- src/libstd/rt/io/net/ip.rs | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/src/libstd/rt/io/net/ip.rs b/src/libstd/rt/io/net/ip.rs index 84abc058c333c..d71b891350ecb 100644 --- a/src/libstd/rt/io/net/ip.rs +++ b/src/libstd/rt/io/net/ip.rs @@ -8,28 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp::{Eq, TotalEq}; - +#[deriving(Eq, TotalEq)] pub enum IpAddr { Ipv4(u8, u8, u8, u8, u16), Ipv6 } - -impl Eq for IpAddr { - fn eq(&self, other: &IpAddr) -> bool { - match (*self, *other) { - (Ipv4(a,b,c,d,e), Ipv4(f,g,h,i,j)) => (a,b,c,d,e) == (f,g,h,i,j), - (Ipv6, Ipv6) => fail!(), - _ => false - } - } - fn ne(&self, other: &IpAddr) -> bool { - !(self == other) - } -} - -impl TotalEq for IpAddr { - fn equals(&self, other: &IpAddr) -> bool { - *self == *other - } -} From 391bb0b4e7131cd7d30e03deea3eb9756a7c8954 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 19 Jun 2013 18:37:50 -0700 Subject: [PATCH 070/111] std: Make newsched failures log correctly --- src/libstd/sys.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/libstd/sys.rs b/src/libstd/sys.rs index e49ad34854209..f2591996e3a9e 100644 --- a/src/libstd/sys.rs +++ b/src/libstd/sys.rs @@ -180,10 +180,13 @@ impl FailWithCause for &'static str { // FIXME #4427: Temporary until rt::rt_fail_ goes away pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! { + use cell::Cell; use option::Option; + use either::Left; use rt::{context, OldTaskContext, TaskContext}; use rt::task::{Task, Unwinder}; use rt::local::Local; + use rt::logging::Logger; let context = context(); match context { @@ -200,12 +203,18 @@ pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! { let msg = str::raw::from_c_str(msg); let file = str::raw::from_c_str(file); - let outmsg = fmt!("%s at line %i of file %s", msg, line as int, file); + let outmsg = fmt!("task failed: '%s' at line %i of file %s", + msg, line as int, file); // XXX: Logging doesn't work correctly in non-task context because it // invokes the local heap if context == TaskContext { - error!(outmsg); + // XXX: Logging doesn't work here - the check to call the log + // function never passes - so calling the log function directly. + let outmsg = Cell::new(outmsg); + do Local::borrow:: |task| { + task.logger.log(Left(outmsg.take())); + } } else { rtdebug!("%s", outmsg); } From 4d39253a9623ff30c27cee3c9770634a41f4412d Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 20 Jun 2013 12:16:04 -0700 Subject: [PATCH 071/111] std::rt: Whitespace --- src/libstd/rt/sched.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 453d3303db668..bbe4aa25e2967 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -646,12 +646,12 @@ impl Scheduler { } } -// The cases for the below function. +// The cases for the below function. enum ResumeAction { SendHome, Requeue, ResumeNow, - Homeless + Homeless } impl SchedHandle { From 7a9a6e45911636eae3ec4e1c111bc0e120601a5a Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Thu, 20 Jun 2013 18:26:56 -0700 Subject: [PATCH 072/111] std: Port SharedChan to newsched --- src/libstd/comm.rs | 40 +++++++++++++++++++++++++--------------- src/libstd/rt/task.rs | 12 ++++++++++++ 2 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/libstd/comm.rs b/src/libstd/comm.rs index 7918abe4ae6a9..00c33c8ab325d 100644 --- a/src/libstd/comm.rs +++ b/src/libstd/comm.rs @@ -220,7 +220,7 @@ impl Peekable for PortSet { /// A channel that can be shared between many senders. pub struct SharedChan { - ch: Exclusive> + inner: Either>, rtcomm::SharedChan> } impl SharedChan { @@ -228,40 +228,50 @@ impl SharedChan { pub fn new(c: Chan) -> SharedChan { let Chan { inner } = c; let c = match inner { - Left(c) => c, - Right(_) => fail!("SharedChan not implemented") + Left(c) => Left(exclusive(c)), + Right(c) => Right(rtcomm::SharedChan::new(c)) }; - SharedChan { ch: exclusive(c) } + SharedChan { inner: c } } } impl GenericChan for SharedChan { fn send(&self, x: T) { - unsafe { - let mut xx = Some(x); - do self.ch.with_imm |chan| { - let x = replace(&mut xx, None); - chan.send(x.unwrap()) + match self.inner { + Left(ref chan) => { + unsafe { + let mut xx = Some(x); + do chan.with_imm |chan| { + let x = replace(&mut xx, None); + chan.send(x.unwrap()) + } + } } + Right(ref chan) => chan.send(x) } } } impl GenericSmartChan for SharedChan { fn try_send(&self, x: T) -> bool { - unsafe { - let mut xx = Some(x); - do self.ch.with_imm |chan| { - let x = replace(&mut xx, None); - chan.try_send(x.unwrap()) + match self.inner { + Left(ref chan) => { + unsafe { + let mut xx = Some(x); + do chan.with_imm |chan| { + let x = replace(&mut xx, None); + chan.try_send(x.unwrap()) + } + } } + Right(ref chan) => chan.try_send(x) } } } impl ::clone::Clone for SharedChan { fn clone(&self) -> SharedChan { - SharedChan { ch: self.ch.clone() } + SharedChan { inner: self.inner.clone() } } } diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index e7f87906fe59a..833f25b253c05 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -248,6 +248,18 @@ mod test { } } + #[test] + fn comm_shared_chan() { + use comm::*; + + do run_in_newsched_task() { + let (port, chan) = stream(); + let chan = SharedChan::new(chan); + chan.send(10); + assert!(port.recv() == 10); + } + } + #[test] fn linked_failure() { do run_in_newsched_task() { From 1b7c99655f300aa0b8ba216cd2029dc588c3ef88 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Fri, 21 Jun 2013 01:28:23 -0700 Subject: [PATCH 073/111] std::rt: Support os::args --- src/libstd/os.rs | 17 ++++-- src/libstd/rt/args.rs | 125 ++++++++++++++++++++++++++++++++++++++++ src/libstd/rt/mod.rs | 26 ++++++--- src/rt/rust_builtin.cpp | 18 ++++++ src/rt/rustrt.def.in | 1 + 5 files changed, 176 insertions(+), 11 deletions(-) create mode 100644 src/libstd/rt/args.rs diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 59b40b93d4d16..765dd30febcfb 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -40,6 +40,8 @@ use option::{Some, None}; use os; use prelude::*; use ptr; +use rt; +use rt::TaskContext; use str; use uint; use unstable::finally::Finally; @@ -1167,10 +1169,17 @@ pub fn real_args() -> ~[~str] { #[cfg(target_os = "android")] #[cfg(target_os = "freebsd")] pub fn real_args() -> ~[~str] { - unsafe { - let argc = rustrt::rust_get_argc(); - let argv = rustrt::rust_get_argv(); - load_argc_and_argv(argc, argv) + if rt::context() == TaskContext { + match rt::args::clone() { + Some(args) => args, + None => fail!("process arguments not initialized") + } + } else { + unsafe { + let argc = rustrt::rust_get_argc(); + let argv = rustrt::rust_get_argv(); + load_argc_and_argv(argc, argv) + } } } diff --git a/src/libstd/rt/args.rs b/src/libstd/rt/args.rs new file mode 100644 index 0000000000000..75ee4f381f6ef --- /dev/null +++ b/src/libstd/rt/args.rs @@ -0,0 +1,125 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Global storage for command line arguments +//! +//! The current incarnation of the Rust runtime expects for +//! the processes `argc` and `argv` arguments to be stored +//! in a globally-accessible location for use by the `os` module. +//! +//! XXX: Would be nice for this to not exist. +//! XXX: This has a lot of C glue for lack of globals. + +use libc; +use option::{Option, Some, None}; +use str; +use uint; +use unstable::finally::Finally; +use util; + +/// One-time global initialization. +pub unsafe fn init(argc: int, argv: **u8) { + let args = load_argc_and_argv(argc, argv); + put(args); +} + +/// One-time global cleanup. +pub fn cleanup() { + rtassert!(take().is_some()); +} + +/// Take the global arguments from global storage. +pub fn take() -> Option<~[~str]> { + with_lock(|| unsafe { + let ptr = get_global_ptr(); + let val = util::replace(&mut *ptr, None); + val.map(|s: &~~[~str]| (**s).clone()) + }) +} + +/// Give the global arguments to global storage. +/// +/// It is an error if the arguments already exist. +pub fn put(args: ~[~str]) { + with_lock(|| unsafe { + let ptr = get_global_ptr(); + rtassert!((*ptr).is_none()); + (*ptr) = Some(~args.clone()); + }) +} + +/// Make a clone of the global arguments. +pub fn clone() -> Option<~[~str]> { + with_lock(|| unsafe { + let ptr = get_global_ptr(); + (*ptr).map(|s: &~~[~str]| (**s).clone()) + }) +} + +fn with_lock(f: &fn() -> T) -> T { + do (|| { + unsafe { + rust_take_global_args_lock(); + f() + } + }).finally { + unsafe { + rust_drop_global_args_lock(); + } + } +} + +fn get_global_ptr() -> *mut Option<~~[~str]> { + unsafe { rust_get_global_args_ptr() } +} + +// Copied from `os`. +unsafe fn load_argc_and_argv(argc: int, argv: **u8) -> ~[~str] { + let mut args = ~[]; + for uint::range(0, argc as uint) |i| { + args.push(str::raw::from_c_str(*(argv as **libc::c_char).offset(i))); + } + return args; +} + +extern { + fn rust_take_global_args_lock(); + fn rust_drop_global_args_lock(); + fn rust_get_global_args_ptr() -> *mut Option<~~[~str]>; +} + +#[cfg(test)] +mod tests { + use option::{Some, None}; + use super::*; + use unstable::finally::Finally; + + #[test] + fn smoke_test() { + // Preserve the actual global state. + let saved_value = take(); + + let expected = ~[~"happy", ~"today?"]; + + put(expected.clone()); + assert!(clone() == Some(expected.clone())); + assert!(take() == Some(expected.clone())); + assert!(take() == None); + + do (|| { + }).finally { + // Restore the actual global state. + match saved_value { + Some(ref args) => put(args.clone()), + None => () + } + } + } +} diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index a80fb15bad798..7e1d18f954860 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -159,6 +159,9 @@ pub mod metrics; /// Just stuff pub mod util; +// Global command line argument storage +pub mod args; + /// Set up a default runtime configuration, given compiler-supplied arguments. /// /// This is invoked by the `start` _language item_ (unstable::lang) to @@ -173,20 +176,28 @@ pub mod util; /// # Return value /// /// The return value is used as the process return code. 0 on success, 101 on error. -pub fn start(_argc: int, _argv: **u8, crate_map: *u8, main: ~fn()) -> int { +pub fn start(argc: int, argv: **u8, crate_map: *u8, main: ~fn()) -> int { - init(crate_map); + init(argc, argv, crate_map); let exit_code = run(main); cleanup(); return exit_code; } -/// One-time runtime initialization. Currently all this does is set up logging -/// based on the RUST_LOG environment variable. -pub fn init(crate_map: *u8) { - logging::init(crate_map); - unsafe { rust_update_gc_metadata(crate_map) } +/// One-time runtime initialization. +/// +/// Initializes global state, including frobbing +/// the crate's logging flags, registering GC +/// metadata, and storing the process arguments. +pub fn init(argc: int, argv: **u8, crate_map: *u8) { + // XXX: Derefing these pointers is not safe. + // Need to propagate the unsafety to `start`. + unsafe { + args::init(argc, argv); + logging::init(crate_map); + rust_update_gc_metadata(crate_map); + } extern { fn rust_update_gc_metadata(crate_map: *u8); @@ -195,6 +206,7 @@ pub fn init(crate_map: *u8) { /// One-time runtime cleanup. pub fn cleanup() { + args::cleanup(); global_heap::cleanup(); } diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 8e494cb577b55..86b8881b9f2ed 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -932,6 +932,24 @@ rust_get_num_cpus() { return get_num_cpus(); } +static lock_and_signal global_args_lock; +static uintptr_t global_args_ptr = 0; + +extern "C" CDECL void +rust_take_global_args_lock() { + global_args_lock.lock(); +} + +extern "C" CDECL void +rust_drop_global_args_lock() { + global_args_lock.unlock(); +} + +extern "C" CDECL uintptr_t* +rust_get_global_args_ptr() { + return &global_args_ptr; +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 425f17d429007..ca813c5d3ae5d 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -242,3 +242,4 @@ rust_drop_env_lock rust_update_log_settings rust_running_on_valgrind rust_get_num_cpus +rust_get_global_args_ptr \ No newline at end of file From 95eb01957bf23922abdf083f677c6c2d6927713a Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Fri, 21 Jun 2013 16:52:07 -0700 Subject: [PATCH 074/111] std: Make console log off/on controls work with newsched --- src/libstd/logging.rs | 24 ++++++++++++++++++++---- src/libstd/rt/logging.rs | 20 ++++++++++++++++---- src/rt/rust_builtin.cpp | 12 +++++++++--- src/rt/rust_log.cpp | 12 ++++++++---- src/rt/rustrt.def.in | 1 + 5 files changed, 54 insertions(+), 15 deletions(-) diff --git a/src/libstd/logging.rs b/src/libstd/logging.rs index c2f854179b8dd..743b71e33ea80 100644 --- a/src/libstd/logging.rs +++ b/src/libstd/logging.rs @@ -11,13 +11,20 @@ //! Logging use option::*; +use os; use either::*; +use rt; +use rt::OldTaskContext; use rt::logging::{Logger, StdErrLogger}; /// Turns on logging to stdout globally pub fn console_on() { - unsafe { - rustrt::rust_log_console_on(); + if rt::context() == OldTaskContext { + unsafe { + rustrt::rust_log_console_on(); + } + } else { + rt::logging::console_on(); } } @@ -29,8 +36,17 @@ pub fn console_on() { * the RUST_LOG environment variable */ pub fn console_off() { - unsafe { - rustrt::rust_log_console_off(); + // If RUST_LOG is set then the console can't be turned off + if os::getenv("RUST_LOG").is_some() { + return; + } + + if rt::context() == OldTaskContext { + unsafe { + rustrt::rust_log_console_off(); + } + } else { + rt::logging::console_off(); } } diff --git a/src/libstd/rt/logging.rs b/src/libstd/rt/logging.rs index a0d0539768912..84186180aa650 100644 --- a/src/libstd/rt/logging.rs +++ b/src/libstd/rt/logging.rs @@ -9,6 +9,7 @@ // except according to those terms. use either::*; +use libc; pub trait Logger { fn log(&mut self, msg: Either<~str, &'static str>); @@ -20,6 +21,10 @@ impl Logger for StdErrLogger { fn log(&mut self, msg: Either<~str, &'static str>) { use io::{Writer, WriterUtil}; + if !should_log_console() { + return; + } + let s: &str = match msg { Left(ref s) => { let s: &str = *s; @@ -44,7 +49,6 @@ pub fn init(crate_map: *u8) { use str; use ptr; use option::{Some, None}; - use libc::c_char; let log_spec = os::getenv("RUST_LOG"); match log_spec { @@ -61,8 +65,16 @@ pub fn init(crate_map: *u8) { } } } +} - extern { - fn rust_update_log_settings(crate_map: *u8, settings: *c_char); - } +pub fn console_on() { unsafe { rust_log_console_on() } } +pub fn console_off() { unsafe { rust_log_console_off() } } +fn should_log_console() -> bool { unsafe { rust_should_log_console() != 0 } } + +extern { + fn rust_update_log_settings(crate_map: *u8, settings: *libc::c_char); + fn rust_log_console_on(); + fn rust_log_console_off(); + fn rust_should_log_console() -> libc::uintptr_t; } + diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 86b8881b9f2ed..f0b68d4a1569a 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -591,12 +591,18 @@ rust_log_console_on() { log_console_on(); } -extern void log_console_off(rust_env *env); +extern void log_console_off(); extern "C" CDECL void rust_log_console_off() { - rust_task *task = rust_get_current_task(); - log_console_off(task->kernel->env); + log_console_off(); +} + +extern bool should_log_console(); + +extern "C" CDECL uintptr_t +rust_should_log_console() { + return (uintptr_t)should_log_console(); } extern "C" CDECL void diff --git a/src/rt/rust_log.cpp b/src/rt/rust_log.cpp index df24f569495b4..8179c53e96d5e 100644 --- a/src/rt/rust_log.cpp +++ b/src/rt/rust_log.cpp @@ -43,11 +43,15 @@ log_console_on() { * overridden by the environment. */ void -log_console_off(rust_env *env) { +log_console_off() { scoped_lock with(_log_lock); - if (env->logspec == NULL) { - _log_to_console = false; - } + _log_to_console = false; +} + +bool +should_log_console() { + scoped_lock with(_log_lock); + return _log_to_console; } rust_log::rust_log(rust_sched_loop *sched_loop) : diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index ca813c5d3ae5d..950662b91f8e1 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -37,6 +37,7 @@ rust_list_dir_wfd_size rust_list_dir_wfd_fp_buf rust_log_console_on rust_log_console_off +rust_should_log_console rust_set_environ rust_unset_sigprocmask rust_sched_current_nonlazy_threads From aa9210d25afb3779e1d95722b73b62a7be6274fe Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Fri, 21 Jun 2013 19:40:00 -0700 Subject: [PATCH 075/111] std: Rewrite vec_reserve_shared_actual in Rust --- src/libstd/at_vec.rs | 62 +++++++++++++++++++++++---------- src/libstd/rt/local_heap.rs | 9 +++++ src/libstd/vec.rs | 10 ++---- src/rt/rust_builtin.cpp | 12 ++++--- src/rt/rust_util.h | 10 ------ src/rt/rustrt.def.in | 3 +- src/test/run-pass/extern-pub.rs | 6 +--- 7 files changed, 67 insertions(+), 45 deletions(-) diff --git a/src/libstd/at_vec.rs b/src/libstd/at_vec.rs index 3875847ff9bbd..18dfbd82c5ae7 100644 --- a/src/libstd/at_vec.rs +++ b/src/libstd/at_vec.rs @@ -23,20 +23,6 @@ use vec; /// Code for dealing with @-vectors. This is pretty incomplete, and /// contains a bunch of duplication from the code for ~-vectors. -pub mod rustrt { - use libc; - use sys; - use vec; - - #[abi = "cdecl"] - #[link_name = "rustrt"] - pub extern { - pub unsafe fn vec_reserve_shared_actual(t: *sys::TypeDesc, - v: **vec::raw::VecRepr, - n: libc::size_t); - } -} - /// Returns the number of elements the vector can hold without reallocating #[inline] pub fn capacity(v: @[T]) -> uint { @@ -189,7 +175,7 @@ pub mod traits { pub mod traits {} pub mod raw { - use at_vec::{capacity, rustrt}; + use at_vec::capacity; use cast::{transmute, transmute_copy}; use libc; use ptr; @@ -197,6 +183,8 @@ pub mod raw { use uint; use unstable::intrinsics::{move_val_init}; use vec; + use vec::UnboxedVecRepr; + use sys::TypeDesc; pub type VecRepr = vec::raw::VecRepr; pub type SliceRepr = vec::raw::SliceRepr; @@ -257,9 +245,47 @@ pub mod raw { pub unsafe fn reserve(v: &mut @[T], n: uint) { // Only make the (slow) call into the runtime if we have to if capacity(*v) < n { - let ptr: **VecRepr = transmute(v); - rustrt::vec_reserve_shared_actual(sys::get_type_desc::(), - ptr, n as libc::size_t); + let ptr: *mut *mut VecRepr = transmute(v); + let ty = sys::get_type_desc::(); + return reserve_raw(ty, ptr, n); + } + } + + // Implementation detail. Shouldn't be public + #[allow(missing_doc)] + pub fn reserve_raw(ty: *TypeDesc, ptr: *mut *mut VecRepr, n: uint) { + + unsafe { + let size_in_bytes = n * (*ty).size; + if size_in_bytes > (**ptr).unboxed.alloc { + let total_size = size_in_bytes + sys::size_of::(); + // XXX: UnboxedVecRepr has an extra u8 at the end + let total_size = total_size - sys::size_of::(); + (*ptr) = local_realloc(*ptr as *(), total_size) as *mut VecRepr; + (**ptr).unboxed.alloc = size_in_bytes; + } + } + + fn local_realloc(ptr: *(), size: uint) -> *() { + use rt; + use rt::OldTaskContext; + use rt::local::Local; + use rt::task::Task; + + if rt::context() == OldTaskContext { + unsafe { + return rust_local_realloc(ptr, size as libc::size_t); + } + + extern { + #[fast_ffi] + fn rust_local_realloc(ptr: *(), size: libc::size_t) -> *(); + } + } else { + do Local::borrow:: |task| { + task.heap.realloc(ptr as *libc::c_void, size) as *() + } + } } } diff --git a/src/libstd/rt/local_heap.rs b/src/libstd/rt/local_heap.rs index 6bf228a1b2201..38cd25f9da5d0 100644 --- a/src/libstd/rt/local_heap.rs +++ b/src/libstd/rt/local_heap.rs @@ -49,6 +49,12 @@ impl LocalHeap { } } + pub fn realloc(&mut self, ptr: *OpaqueBox, size: uint) -> *OpaqueBox { + unsafe { + return rust_boxed_region_realloc(self.boxed_region, ptr, size as size_t); + } + } + pub fn free(&mut self, box: *OpaqueBox) { unsafe { return rust_boxed_region_free(self.boxed_region, box); @@ -76,5 +82,8 @@ extern { fn rust_boxed_region_malloc(region: *BoxedRegion, td: *TypeDesc, size: size_t) -> *OpaqueBox; + fn rust_boxed_region_realloc(region: *BoxedRegion, + ptr: *OpaqueBox, + size: size_t) -> *OpaqueBox; fn rust_boxed_region_free(region: *BoxedRegion, box: *OpaqueBox); } diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 7f683d0070fb3..4339153c43ee4 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -48,12 +48,8 @@ pub mod rustrt { // to ~[] and reserve_shared_actual applies to @[]. #[fast_ffi] unsafe fn vec_reserve_shared(t: *sys::TypeDesc, - v: **raw::VecRepr, + v: *mut *mut raw::VecRepr, n: libc::size_t); - #[fast_ffi] - unsafe fn vec_reserve_shared_actual(t: *sys::TypeDesc, - v: **raw::VecRepr, - n: libc::size_t); } } @@ -79,11 +75,11 @@ pub fn reserve(v: &mut ~[T], n: uint) { use managed; if capacity(v) < n { unsafe { - let ptr: **raw::VecRepr = cast::transmute(v); + let ptr: *mut *mut raw::VecRepr = cast::transmute(v); let td = sys::get_type_desc::(); if ((**ptr).box_header.ref_count == managed::raw::RC_MANAGED_UNIQUE) { - rustrt::vec_reserve_shared_actual(td, ptr, n as libc::size_t); + ::at_vec::raw::reserve_raw(td, ptr, n); } else { rustrt::vec_reserve_shared(td, ptr, n as libc::size_t); } diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index f0b68d4a1569a..9ed389b178aed 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -68,11 +68,10 @@ rust_env_pairs() { } #endif -extern "C" CDECL void -vec_reserve_shared_actual(type_desc* ty, rust_vec_box** vp, - size_t n_elts) { +extern "C" CDECL void * +rust_local_realloc(rust_opaque_box *ptr, size_t size) { rust_task *task = rust_get_current_task(); - reserve_vec_exact_shared(task, vp, n_elts * ty->size); + return task->boxed.realloc(ptr, size); } // This is completely misnamed. @@ -899,6 +898,11 @@ rust_boxed_region_malloc(boxed_region *region, type_desc *td, size_t size) { return region->malloc(td, size); } +extern "C" CDECL rust_opaque_box* +rust_boxed_region_realloc(boxed_region *region, rust_opaque_box *ptr, size_t size) { + return region->realloc(ptr, size); +} + extern "C" CDECL void rust_boxed_region_free(boxed_region *region, rust_opaque_box *box) { region->free(box); diff --git a/src/rt/rust_util.h b/src/rt/rust_util.h index 5b8378b0ad35f..242c2ef0a81ad 100644 --- a/src/rt/rust_util.h +++ b/src/rt/rust_util.h @@ -57,16 +57,6 @@ vec_data(rust_vec *v) { return reinterpret_cast(v->data); } -inline void reserve_vec_exact_shared(rust_task* task, rust_vec_box** vpp, - size_t size) { - rust_opaque_box** ovpp = (rust_opaque_box**)vpp; - if (size > (*vpp)->body.alloc) { - *vpp = (rust_vec_box*)task->boxed.realloc( - *ovpp, size + sizeof(rust_vec)); - (*vpp)->body.alloc = size; - } -} - inline void reserve_vec_exact(rust_vec_box** vpp, size_t size) { if (size > (*vpp)->body.alloc) { diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 950662b91f8e1..9add8d537af0c 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -53,7 +53,7 @@ rust_get_stack_segment rust_get_c_stack rust_log_str start_task -vec_reserve_shared_actual +rust_local_realloc vec_reserve_shared task_clear_event_reject task_wait_event @@ -231,6 +231,7 @@ rust_delete_memory_region rust_new_boxed_region rust_delete_boxed_region rust_boxed_region_malloc +rust_boxed_region_realloc rust_boxed_region_free rust_try rust_begin_unwind diff --git a/src/test/run-pass/extern-pub.rs b/src/test/run-pass/extern-pub.rs index 29b0457fc0507..e722c4f5c6a90 100644 --- a/src/test/run-pass/extern-pub.rs +++ b/src/test/run-pass/extern-pub.rs @@ -1,11 +1,7 @@ use std::libc; -use std::sys; -use std::vec; extern { - pub unsafe fn vec_reserve_shared_actual(t: *sys::TypeDesc, - v: **vec::raw::VecRepr, - n: libc::size_t); + pub unsafe fn debug_get_stk_seg() -> *libc::c_void; } pub fn main() { From a09972db3545344048b90e90d1f1821b621a38b9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 22 Jun 2013 16:52:40 -0700 Subject: [PATCH 076/111] std: Move dynamic borrowck code from unstable::lang to rt::borrowck --- src/libstd/cleanup.rs | 2 +- src/libstd/rt/borrowck.rs | 283 ++++++++++++++++++++++++++++++++++++ src/libstd/rt/mod.rs | 3 + src/libstd/unstable/lang.rs | 235 +----------------------------- 4 files changed, 294 insertions(+), 229 deletions(-) create mode 100644 src/libstd/rt/borrowck.rs diff --git a/src/libstd/cleanup.rs b/src/libstd/cleanup.rs index d1460b7a3c96b..dd7e85ce7e110 100644 --- a/src/libstd/cleanup.rs +++ b/src/libstd/cleanup.rs @@ -15,7 +15,7 @@ use ptr::mut_null; use repr::BoxRepr; use sys::TypeDesc; use cast::transmute; -#[cfg(not(test))] use unstable::lang::clear_task_borrow_list; +#[cfg(not(test))] use rt::borrowck::clear_task_borrow_list; #[cfg(not(test))] use ptr::to_unsafe_ptr; diff --git a/src/libstd/rt/borrowck.rs b/src/libstd/rt/borrowck.rs new file mode 100644 index 0000000000000..e057f6e963714 --- /dev/null +++ b/src/libstd/rt/borrowck.rs @@ -0,0 +1,283 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cast::transmute; +use libc::{c_char, c_void, size_t, STDERR_FILENO}; +use io; +use io::{Writer, WriterUtil}; +use managed::raw::BoxRepr; +use option::{Option, None, Some}; +use uint; +use str; +use str::OwnedStr; +use sys; +use vec::ImmutableVector; + +#[allow(non_camel_case_types)] +type rust_task = c_void; + +pub static FROZEN_BIT: uint = 1 << (uint::bits - 1); +pub static MUT_BIT: uint = 1 << (uint::bits - 2); +static ALL_BITS: uint = FROZEN_BIT | MUT_BIT; + +#[deriving(Eq)] +struct BorrowRecord { + box: *mut BoxRepr, + file: *c_char, + line: size_t +} + +fn try_take_task_borrow_list() -> Option<~[BorrowRecord]> { + unsafe { + let cur_task: *rust_task = rust_try_get_task(); + if cur_task.is_not_null() { + let ptr = rust_take_task_borrow_list(cur_task); + if ptr.is_null() { + None + } else { + let v: ~[BorrowRecord] = transmute(ptr); + Some(v) + } + } else { + None + } + } +} + +fn swap_task_borrow_list(f: &fn(~[BorrowRecord]) -> ~[BorrowRecord]) { + unsafe { + let cur_task: *rust_task = rust_try_get_task(); + if cur_task.is_not_null() { + let mut borrow_list: ~[BorrowRecord] = { + let ptr = rust_take_task_borrow_list(cur_task); + if ptr.is_null() { ~[] } else { transmute(ptr) } + }; + borrow_list = f(borrow_list); + rust_set_task_borrow_list(cur_task, transmute(borrow_list)); + } + } +} + +pub unsafe fn clear_task_borrow_list() { + // pub because it is used by the box annihilator. + let _ = try_take_task_borrow_list(); +} + +unsafe fn fail_borrowed(box: *mut BoxRepr, file: *c_char, line: size_t) { + debug_borrow("fail_borrowed: ", box, 0, 0, file, line); + + match try_take_task_borrow_list() { + None => { // not recording borrows + let msg = "borrowed"; + do str::as_buf(msg) |msg_p, _| { + sys::begin_unwind_(msg_p as *c_char, file, line); + } + } + Some(borrow_list) => { // recording borrows + let mut msg = ~"borrowed"; + let mut sep = " at "; + for borrow_list.rev_iter().advance |entry| { + if entry.box == box { + msg.push_str(sep); + let filename = str::raw::from_c_str(entry.file); + msg.push_str(filename); + msg.push_str(fmt!(":%u", entry.line as uint)); + sep = " and at "; + } + } + do str::as_buf(msg) |msg_p, _| { + sys::begin_unwind_(msg_p as *c_char, file, line) + } + } + } +} + +/// Because this code is so perf. sensitive, use a static constant so that +/// debug printouts are compiled out most of the time. +static ENABLE_DEBUG: bool = false; + +#[inline] +unsafe fn debug_borrow(tag: &'static str, + p: *const T, + old_bits: uint, + new_bits: uint, + filename: *c_char, + line: size_t) { + //! A useful debugging function that prints a pointer + tag + newline + //! without allocating memory. + + if ENABLE_DEBUG && ::rt::env::get().debug_borrow { + debug_borrow_slow(tag, p, old_bits, new_bits, filename, line); + } + + unsafe fn debug_borrow_slow(tag: &'static str, + p: *const T, + old_bits: uint, + new_bits: uint, + filename: *c_char, + line: size_t) { + let dbg = STDERR_FILENO as io::fd_t; + dbg.write_str(tag); + dbg.write_hex(p as uint); + dbg.write_str(" "); + dbg.write_hex(old_bits); + dbg.write_str(" "); + dbg.write_hex(new_bits); + dbg.write_str(" "); + dbg.write_cstr(filename); + dbg.write_str(":"); + dbg.write_hex(line as uint); + dbg.write_str("\n"); + } +} + +trait DebugPrints { + fn write_hex(&self, val: uint); + unsafe fn write_cstr(&self, str: *c_char); +} + +impl DebugPrints for io::fd_t { + fn write_hex(&self, mut i: uint) { + let letters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', + '9', 'a', 'b', 'c', 'd', 'e', 'f']; + static uint_nibbles: uint = ::uint::bytes << 1; + let mut buffer = [0_u8, ..uint_nibbles+1]; + let mut c = uint_nibbles; + while c > 0 { + c -= 1; + buffer[c] = letters[i & 0xF] as u8; + i >>= 4; + } + self.write(buffer.slice(0, uint_nibbles)); + } + + unsafe fn write_cstr(&self, p: *c_char) { + use libc::strlen; + use vec; + + let len = strlen(p); + let p: *u8 = transmute(p); + do vec::raw::buf_as_slice(p, len as uint) |s| { + self.write(s); + } + } +} + +#[inline] +pub unsafe fn borrow_as_imm(a: *u8, file: *c_char, line: size_t) -> uint { + let a: *mut BoxRepr = transmute(a); + let old_ref_count = (*a).header.ref_count; + let new_ref_count = old_ref_count | FROZEN_BIT; + + debug_borrow("borrow_as_imm:", a, old_ref_count, new_ref_count, file, line); + + if (old_ref_count & MUT_BIT) != 0 { + fail_borrowed(a, file, line); + } + + (*a).header.ref_count = new_ref_count; + + old_ref_count +} + +#[inline] +pub unsafe fn borrow_as_mut(a: *u8, file: *c_char, line: size_t) -> uint { + let a: *mut BoxRepr = transmute(a); + let old_ref_count = (*a).header.ref_count; + let new_ref_count = old_ref_count | MUT_BIT | FROZEN_BIT; + + debug_borrow("borrow_as_mut:", a, old_ref_count, new_ref_count, file, line); + + if (old_ref_count & (MUT_BIT|FROZEN_BIT)) != 0 { + fail_borrowed(a, file, line); + } + + (*a).header.ref_count = new_ref_count; + + old_ref_count +} + +pub unsafe fn record_borrow(a: *u8, old_ref_count: uint, + file: *c_char, line: size_t) { + if (old_ref_count & ALL_BITS) == 0 { + // was not borrowed before + let a: *mut BoxRepr = transmute(a); + debug_borrow("record_borrow:", a, old_ref_count, 0, file, line); + do swap_task_borrow_list |borrow_list| { + let mut borrow_list = borrow_list; + borrow_list.push(BorrowRecord {box: a, file: file, line: line}); + borrow_list + } + } +} + +pub unsafe fn unrecord_borrow(a: *u8, old_ref_count: uint, + file: *c_char, line: size_t) { + if (old_ref_count & ALL_BITS) == 0 { + // was not borrowed before, so we should find the record at + // the end of the list + let a: *mut BoxRepr = transmute(a); + debug_borrow("unrecord_borrow:", a, old_ref_count, 0, file, line); + do swap_task_borrow_list |borrow_list| { + let mut borrow_list = borrow_list; + assert!(!borrow_list.is_empty()); + let br = borrow_list.pop(); + if br.box != a || br.file != file || br.line != line { + let err = fmt!("wrong borrow found, br=%?", br); + do str::as_buf(err) |msg_p, _| { + sys::begin_unwind_(msg_p as *c_char, file, line) + } + } + borrow_list + } + } +} + +#[inline] +pub unsafe fn return_to_mut(a: *u8, orig_ref_count: uint, + file: *c_char, line: size_t) { + // Sometimes the box is null, if it is conditionally frozen. + // See e.g. #4904. + if !a.is_null() { + let a: *mut BoxRepr = transmute(a); + let old_ref_count = (*a).header.ref_count; + let new_ref_count = + (old_ref_count & !ALL_BITS) | (orig_ref_count & ALL_BITS); + + debug_borrow("return_to_mut:", + a, old_ref_count, new_ref_count, file, line); + + (*a).header.ref_count = new_ref_count; + } +} + +#[inline] +pub unsafe fn check_not_borrowed(a: *u8, + file: *c_char, + line: size_t) { + let a: *mut BoxRepr = transmute(a); + let ref_count = (*a).header.ref_count; + debug_borrow("check_not_borrowed:", a, ref_count, 0, file, line); + if (ref_count & FROZEN_BIT) != 0 { + fail_borrowed(a, file, line); + } +} + + +extern { + #[rust_stack] + pub fn rust_take_task_borrow_list(task: *rust_task) -> *c_void; + + #[rust_stack] + pub fn rust_set_task_borrow_list(task: *rust_task, map: *c_void); + + #[rust_stack] + pub fn rust_try_get_task() -> *rust_task; +} diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 7e1d18f954860..6c4e5742eb333 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -162,6 +162,9 @@ pub mod util; // Global command line argument storage pub mod args; +// Support for dynamic borrowck +pub mod borrowck; + /// Set up a default runtime configuration, given compiler-supplied arguments. /// /// This is invoked by the `start` _language item_ (unstable::lang) to diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs index fa067da2ebc76..d71a672eb3552 100644 --- a/src/libstd/unstable/lang.rs +++ b/src/libstd/unstable/lang.rs @@ -14,7 +14,6 @@ use iterator::IteratorUtil; use uint; use cast::transmute; use libc::{c_char, c_uchar, c_void, size_t, uintptr_t, c_int, STDERR_FILENO}; -use managed::raw::BoxRepr; use str; use sys; use rt::{context, OldTaskContext}; @@ -23,14 +22,11 @@ use rt::local::Local; use option::{Option, Some, None}; use io; use rt::global_heap; +use rt::borrowck; #[allow(non_camel_case_types)] pub type rust_task = c_void; -pub static FROZEN_BIT: uint = 1 << (uint::bits - 1); -pub static MUT_BIT: uint = 1 << (uint::bits - 2); -static ALL_BITS: uint = FROZEN_BIT | MUT_BIT; - pub mod rustrt { use unstable::lang::rust_task; use libc::{c_void, c_char, uintptr_t}; @@ -50,12 +46,6 @@ pub mod rustrt { #[fast_ffi] unsafe fn rust_upcall_free_noswitch(ptr: *c_char); - #[rust_stack] - fn rust_take_task_borrow_list(task: *rust_task) -> *c_void; - - #[rust_stack] - fn rust_set_task_borrow_list(task: *rust_task, map: *c_void); - #[rust_stack] fn rust_try_get_task() -> *rust_task; @@ -78,78 +68,6 @@ pub fn fail_bounds_check(file: *c_char, line: size_t, } } -#[deriving(Eq)] -struct BorrowRecord { - box: *mut BoxRepr, - file: *c_char, - line: size_t -} - -fn try_take_task_borrow_list() -> Option<~[BorrowRecord]> { - unsafe { - let cur_task: *rust_task = rustrt::rust_try_get_task(); - if cur_task.is_not_null() { - let ptr = rustrt::rust_take_task_borrow_list(cur_task); - if ptr.is_null() { - None - } else { - let v: ~[BorrowRecord] = transmute(ptr); - Some(v) - } - } else { - None - } - } -} - -fn swap_task_borrow_list(f: &fn(~[BorrowRecord]) -> ~[BorrowRecord]) { - unsafe { - let cur_task: *rust_task = rustrt::rust_try_get_task(); - if cur_task.is_not_null() { - let mut borrow_list: ~[BorrowRecord] = { - let ptr = rustrt::rust_take_task_borrow_list(cur_task); - if ptr.is_null() { ~[] } else { transmute(ptr) } - }; - borrow_list = f(borrow_list); - rustrt::rust_set_task_borrow_list(cur_task, transmute(borrow_list)); - } - } -} - -pub unsafe fn clear_task_borrow_list() { - // pub because it is used by the box annihilator. - let _ = try_take_task_borrow_list(); -} - -unsafe fn fail_borrowed(box: *mut BoxRepr, file: *c_char, line: size_t) { - debug_borrow("fail_borrowed: ", box, 0, 0, file, line); - - match try_take_task_borrow_list() { - None => { // not recording borrows - let msg = "borrowed"; - do str::as_buf(msg) |msg_p, _| { - fail_(msg_p as *c_char, file, line); - } - } - Some(borrow_list) => { // recording borrows - let mut msg = ~"borrowed"; - let mut sep = " at "; - for borrow_list.rev_iter().advance |entry| { - if entry.box == box { - msg.push_str(sep); - let filename = str::raw::from_c_str(entry.file); - msg.push_str(filename); - msg.push_str(fmt!(":%u", entry.line as uint)); - sep = " and at "; - } - } - do str::as_buf(msg) |msg_p, _| { - fail_(msg_p as *c_char, file, line) - } - } - } -} - // FIXME #4942: Make these signatures agree with exchange_alloc's signatures #[lang="exchange_malloc"] #[inline] @@ -157,77 +75,6 @@ pub unsafe fn exchange_malloc(td: *c_char, size: uintptr_t) -> *c_char { transmute(global_heap::malloc(transmute(td), transmute(size))) } -/// Because this code is so perf. sensitive, use a static constant so that -/// debug printouts are compiled out most of the time. -static ENABLE_DEBUG: bool = false; - -#[inline] -unsafe fn debug_borrow(tag: &'static str, - p: *const T, - old_bits: uint, - new_bits: uint, - filename: *c_char, - line: size_t) { - //! A useful debugging function that prints a pointer + tag + newline - //! without allocating memory. - - if ENABLE_DEBUG && ::rt::env::get().debug_borrow { - debug_borrow_slow(tag, p, old_bits, new_bits, filename, line); - } - - unsafe fn debug_borrow_slow(tag: &'static str, - p: *const T, - old_bits: uint, - new_bits: uint, - filename: *c_char, - line: size_t) { - let dbg = STDERR_FILENO as io::fd_t; - dbg.write_str(tag); - dbg.write_hex(p as uint); - dbg.write_str(" "); - dbg.write_hex(old_bits); - dbg.write_str(" "); - dbg.write_hex(new_bits); - dbg.write_str(" "); - dbg.write_cstr(filename); - dbg.write_str(":"); - dbg.write_hex(line as uint); - dbg.write_str("\n"); - } -} - -trait DebugPrints { - fn write_hex(&self, val: uint); - unsafe fn write_cstr(&self, str: *c_char); -} - -impl DebugPrints for io::fd_t { - fn write_hex(&self, mut i: uint) { - let letters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', - '9', 'a', 'b', 'c', 'd', 'e', 'f']; - static uint_nibbles: uint = ::uint::bytes << 1; - let mut buffer = [0_u8, ..uint_nibbles+1]; - let mut c = uint_nibbles; - while c > 0 { - c -= 1; - buffer[c] = letters[i & 0xF] as u8; - i >>= 4; - } - self.write(buffer.slice(0, uint_nibbles)); - } - - unsafe fn write_cstr(&self, p: *c_char) { - use libc::strlen; - use vec; - - let len = strlen(p); - let p: *u8 = transmute(p); - do vec::raw::buf_as_slice(p, len as uint) |s| { - self.write(s); - } - } -} - // NB: Calls to free CANNOT be allowed to fail, as throwing an exception from // inside a landing pad may corrupt the state of the exception handler. If a // problem occurs, call exit instead. @@ -273,95 +120,32 @@ pub unsafe fn local_free(ptr: *c_char) { #[lang="borrow_as_imm"] #[inline] pub unsafe fn borrow_as_imm(a: *u8, file: *c_char, line: size_t) -> uint { - let a: *mut BoxRepr = transmute(a); - let old_ref_count = (*a).header.ref_count; - let new_ref_count = old_ref_count | FROZEN_BIT; - - debug_borrow("borrow_as_imm:", a, old_ref_count, new_ref_count, file, line); - - if (old_ref_count & MUT_BIT) != 0 { - fail_borrowed(a, file, line); - } - - (*a).header.ref_count = new_ref_count; - - old_ref_count + borrowck::borrow_as_imm(a, file, line) } #[lang="borrow_as_mut"] #[inline] pub unsafe fn borrow_as_mut(a: *u8, file: *c_char, line: size_t) -> uint { - let a: *mut BoxRepr = transmute(a); - let old_ref_count = (*a).header.ref_count; - let new_ref_count = old_ref_count | MUT_BIT | FROZEN_BIT; - - debug_borrow("borrow_as_mut:", a, old_ref_count, new_ref_count, file, line); - - if (old_ref_count & (MUT_BIT|FROZEN_BIT)) != 0 { - fail_borrowed(a, file, line); - } - - (*a).header.ref_count = new_ref_count; - - old_ref_count + borrowck::borrow_as_mut(a, file, line) } - #[lang="record_borrow"] pub unsafe fn record_borrow(a: *u8, old_ref_count: uint, file: *c_char, line: size_t) { - if (old_ref_count & ALL_BITS) == 0 { - // was not borrowed before - let a: *mut BoxRepr = transmute(a); - debug_borrow("record_borrow:", a, old_ref_count, 0, file, line); - do swap_task_borrow_list |borrow_list| { - let mut borrow_list = borrow_list; - borrow_list.push(BorrowRecord {box: a, file: file, line: line}); - borrow_list - } - } + borrowck::record_borrow(a, old_ref_count, file, line) } #[lang="unrecord_borrow"] pub unsafe fn unrecord_borrow(a: *u8, old_ref_count: uint, file: *c_char, line: size_t) { - if (old_ref_count & ALL_BITS) == 0 { - // was not borrowed before, so we should find the record at - // the end of the list - let a: *mut BoxRepr = transmute(a); - debug_borrow("unrecord_borrow:", a, old_ref_count, 0, file, line); - do swap_task_borrow_list |borrow_list| { - let mut borrow_list = borrow_list; - assert!(!borrow_list.is_empty()); - let br = borrow_list.pop(); - if br.box != a || br.file != file || br.line != line { - let err = fmt!("wrong borrow found, br=%?", br); - do str::as_buf(err) |msg_p, _| { - fail_(msg_p as *c_char, file, line) - } - } - borrow_list - } - } + borrowck::unrecord_borrow(a, old_ref_count, file, line) } #[lang="return_to_mut"] #[inline] pub unsafe fn return_to_mut(a: *u8, orig_ref_count: uint, file: *c_char, line: size_t) { - // Sometimes the box is null, if it is conditionally frozen. - // See e.g. #4904. - if !a.is_null() { - let a: *mut BoxRepr = transmute(a); - let old_ref_count = (*a).header.ref_count; - let new_ref_count = - (old_ref_count & !ALL_BITS) | (orig_ref_count & ALL_BITS); - - debug_borrow("return_to_mut:", - a, old_ref_count, new_ref_count, file, line); - - (*a).header.ref_count = new_ref_count; - } + borrowck::return_to_mut(a, orig_ref_count, file, line) } #[lang="check_not_borrowed"] @@ -369,12 +153,7 @@ pub unsafe fn return_to_mut(a: *u8, orig_ref_count: uint, pub unsafe fn check_not_borrowed(a: *u8, file: *c_char, line: size_t) { - let a: *mut BoxRepr = transmute(a); - let ref_count = (*a).header.ref_count; - debug_borrow("check_not_borrowed:", a, ref_count, 0, file, line); - if (ref_count & FROZEN_BIT) != 0 { - fail_borrowed(a, file, line); - } + borrowck::check_not_borrowed(a, file, line) } #[lang="strdup_uniq"] From 5e7c5d6c3d532e7b536b76044cd47b72b8eadaad Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 22 Jun 2013 01:09:06 -0700 Subject: [PATCH 077/111] std: Make box annihilator work with newsched --- src/libstd/cleanup.rs | 116 ++++-------------------------------- src/libstd/rt/comm.rs | 12 ---- src/libstd/rt/local_heap.rs | 50 +++++++++++++++- src/libstd/rt/mod.rs | 2 +- src/libstd/rt/task.rs | 21 +++++++ src/libstd/sys.rs | 5 +- src/libstd/unstable/lang.rs | 19 ++---- src/rt/rust_builtin.cpp | 6 ++ src/rt/rustrt.def.in | 3 +- 9 files changed, 102 insertions(+), 132 deletions(-) diff --git a/src/libstd/cleanup.rs b/src/libstd/cleanup.rs index dd7e85ce7e110..36c1fdf781b25 100644 --- a/src/libstd/cleanup.rs +++ b/src/libstd/cleanup.rs @@ -13,107 +13,14 @@ use libc::{c_char, c_void, intptr_t, uintptr_t}; use ptr::mut_null; use repr::BoxRepr; +use rt; +use rt::OldTaskContext; use sys::TypeDesc; use cast::transmute; -#[cfg(not(test))] use rt::borrowck::clear_task_borrow_list; #[cfg(not(test))] use ptr::to_unsafe_ptr; -/** - * Runtime structures - * - * NB: These must match the representation in the C++ runtime. - */ - type DropGlue<'self> = &'self fn(**TypeDesc, *c_void); -type FreeGlue<'self> = &'self fn(**TypeDesc, *c_void); - -type TaskID = uintptr_t; - -struct StackSegment { priv opaque: () } -struct Scheduler { priv opaque: () } -struct SchedulerLoop { priv opaque: () } -struct Kernel { priv opaque: () } -struct Env { priv opaque: () } -struct AllocHeader { priv opaque: () } -struct MemoryRegion { priv opaque: () } - -#[cfg(target_arch="x86")] -struct Registers { - data: [u32, ..16] -} - -#[cfg(target_arch="arm")] -#[cfg(target_arch="mips")] -struct Registers { - data: [u32, ..32] -} - -#[cfg(target_arch="x86")] -#[cfg(target_arch="arm")] -#[cfg(target_arch="mips")] -struct Context { - regs: Registers, - next: *Context, - pad: [u32, ..3] -} - -#[cfg(target_arch="x86_64")] -struct Registers { - data: [u64, ..22] -} - -#[cfg(target_arch="x86_64")] -struct Context { - regs: Registers, - next: *Context, - pad: uintptr_t -} - -struct BoxedRegion { - env: *Env, - backing_region: *MemoryRegion, - live_allocs: *BoxRepr -} - -#[cfg(target_arch="x86")] -#[cfg(target_arch="arm")] -#[cfg(target_arch="mips")] -struct Task { - // Public fields - refcount: intptr_t, // 0 - id: TaskID, // 4 - pad: [u32, ..2], // 8 - ctx: Context, // 16 - stack_segment: *StackSegment, // 96 - runtime_sp: uintptr_t, // 100 - scheduler: *Scheduler, // 104 - scheduler_loop: *SchedulerLoop, // 108 - - // Fields known only to the runtime - kernel: *Kernel, // 112 - name: *c_char, // 116 - list_index: i32, // 120 - boxed_region: BoxedRegion // 128 -} - -#[cfg(target_arch="x86_64")] -struct Task { - // Public fields - refcount: intptr_t, - id: TaskID, - ctx: Context, - stack_segment: *StackSegment, - runtime_sp: uintptr_t, - scheduler: *Scheduler, - scheduler_loop: *SchedulerLoop, - - // Fields known only to the runtime - kernel: *Kernel, - name: *c_char, - list_index: i32, - boxed_region: BoxedRegion -} /* * Box annihilation @@ -132,9 +39,9 @@ unsafe fn each_live_alloc(read_next_before: bool, //! Walks the internal list of allocations use managed; + use rt::local_heap; - let task: *Task = transmute(rustrt::rust_get_task()); - let box = (*task).boxed_region.live_allocs; + let box = local_heap::live_allocs(); let mut box: *mut BoxRepr = transmute(copy box); while box != mut_null() { let next_before = transmute(copy (*box).header.next); @@ -156,7 +63,11 @@ unsafe fn each_live_alloc(read_next_before: bool, #[cfg(unix)] fn debug_mem() -> bool { - ::rt::env::get().debug_mem + // XXX: Need to port the environment struct to newsched + match rt::context() { + OldTaskContext => ::rt::env::get().debug_mem, + _ => false + } } #[cfg(windows)] @@ -165,13 +76,12 @@ fn debug_mem() -> bool { } /// Destroys all managed memory (i.e. @ boxes) held by the current task. -#[cfg(not(test))] -#[lang="annihilate"] pub unsafe fn annihilate() { - use unstable::lang::local_free; + use rt::local_heap::local_free; use io::WriterUtil; use io; use libc; + use rt::borrowck; use sys; use managed; @@ -183,7 +93,7 @@ pub unsafe fn annihilate() { // Quick hack: we need to free this list upon task exit, and this // is a convenient place to do it. - clear_task_borrow_list(); + borrowck::clear_task_borrow_list(); // Pass 1: Make all boxes immortal. // @@ -207,7 +117,7 @@ pub unsafe fn annihilate() { if !uniq { let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc); let drop_glue: DropGlue = transmute(((*tydesc).drop_glue, 0)); - drop_glue(to_unsafe_ptr(&tydesc), transmute(&(*box).data)); + drop_glue(&tydesc, transmute(&(*box).data)); } } diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index 82e6d44fe6253..dd27c03ff5164 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -399,12 +399,6 @@ impl GenericChan for SharedChan { } impl GenericSmartChan for SharedChan { - #[cfg(stage0)] // odd type checking errors - fn try_send(&self, _val: T) -> bool { - fail!() - } - - #[cfg(not(stage0))] fn try_send(&self, val: T) -> bool { unsafe { let (next_pone, next_cone) = oneshot(); @@ -448,12 +442,6 @@ impl GenericPort for SharedPort { } } - #[cfg(stage0)] // odd type checking errors - fn try_recv(&self) -> Option { - fail!() - } - - #[cfg(not(stage0))] fn try_recv(&self) -> Option { unsafe { let (next_link_port, next_link_chan) = oneshot(); diff --git a/src/libstd/rt/local_heap.rs b/src/libstd/rt/local_heap.rs index 38cd25f9da5d0..f62c9fb2c660c 100644 --- a/src/libstd/rt/local_heap.rs +++ b/src/libstd/rt/local_heap.rs @@ -10,11 +10,24 @@ //! The local, garbage collected heap +use libc; use libc::{c_void, uintptr_t, size_t}; use ops::Drop; +use repr::BoxRepr; +use rt; +use rt::OldTaskContext; +use rt::local::Local; +use rt::task::Task; type MemoryRegion = c_void; -type BoxedRegion = c_void; + +struct Env { priv opaque: () } + +struct BoxedRegion { + env: *Env, + backing_region: *MemoryRegion, + live_allocs: *BoxRepr +} pub type OpaqueBox = c_void; pub type TypeDesc = c_void; @@ -71,6 +84,40 @@ impl Drop for LocalHeap { } } +// A little compatibility function +pub unsafe fn local_free(ptr: *libc::c_char) { + match rt::context() { + OldTaskContext => { + rust_upcall_free_noswitch(ptr); + + extern { + #[fast_ffi] + unsafe fn rust_upcall_free_noswitch(ptr: *libc::c_char); + } + } + _ => { + do Local::borrow:: |task| { + task.heap.free(ptr as *libc::c_void); + } + } + } +} + +pub fn live_allocs() -> *BoxRepr { + let region = match rt::context() { + OldTaskContext => { + unsafe { rust_current_boxed_region() } + } + _ => { + do Local::borrow:: |task| { + task.heap.boxed_region + } + } + }; + + return unsafe { (*region).live_allocs }; +} + extern { fn rust_new_memory_region(synchronized: uintptr_t, detailed_leaks: uintptr_t, @@ -86,4 +133,5 @@ extern { ptr: *OpaqueBox, size: size_t) -> *OpaqueBox; fn rust_boxed_region_free(region: *BoxedRegion, box: *OpaqueBox); + fn rust_current_boxed_region() -> *BoxedRegion; } diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 6c4e5742eb333..fec555e8afd4d 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -124,7 +124,7 @@ mod thread; pub mod env; /// The local, managed heap -mod local_heap; +pub mod local_heap; /// The Logger trait and implementations pub mod logging; diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 833f25b253c05..68f7eb659b013 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -15,6 +15,7 @@ use borrow; use cast::transmute; +use cleanup; use libc::{c_void, uintptr_t}; use ptr; use prelude::*; @@ -118,6 +119,10 @@ impl Task { } _ => () } + + // Destroy remaining boxes + unsafe { cleanup::annihilate(); } + self.destroyed = true; } } @@ -269,4 +274,20 @@ mod test { assert!(res.is_err()); } } + + #[test] + fn heap_cycles() { + use option::{Option, Some, None}; + + do run_in_newsched_task { + struct List { + next: Option<@mut List>, + } + + let a = @mut List { next: None }; + let b = @mut List { next: Some(a) }; + + a.next = Some(b); + } + } } diff --git a/src/libstd/sys.rs b/src/libstd/sys.rs index 8ac60ffb979c6..523c5d633cf44 100644 --- a/src/libstd/sys.rs +++ b/src/libstd/sys.rs @@ -216,12 +216,15 @@ pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! { task.logger.log(Left(outmsg.take())); } } else { - rtdebug!("%s", outmsg); + rterrln!("%s", outmsg); } gc::cleanup_stack_for_failure(); let task = Local::unsafe_borrow::(); + if (*task).unwinder.unwinding { + rtabort!("unwinding again"); + } (*task).unwinder.begin_unwind(); } } diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs index d71a672eb3552..f750b31a466b9 100644 --- a/src/libstd/unstable/lang.rs +++ b/src/libstd/unstable/lang.rs @@ -43,9 +43,6 @@ pub mod rustrt { size: uintptr_t) -> *c_char; - #[fast_ffi] - unsafe fn rust_upcall_free_noswitch(ptr: *c_char); - #[rust_stack] fn rust_try_get_task() -> *rust_task; @@ -105,16 +102,7 @@ pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char { // problem occurs, call exit instead. #[lang="free"] pub unsafe fn local_free(ptr: *c_char) { - match context() { - OldTaskContext => { - rustrt::rust_upcall_free_noswitch(ptr); - } - _ => { - do Local::borrow:: |task| { - task.heap.free(ptr as *c_void); - } - } - } + ::rt::local_heap::local_free(ptr); } #[lang="borrow_as_imm"] @@ -162,6 +150,11 @@ pub unsafe fn strdup_uniq(ptr: *c_uchar, len: uint) -> ~str { str::raw::from_buf_len(ptr, len) } +#[lang="annihilate"] +pub unsafe fn annihilate() { + ::cleanup::annihilate() +} + #[lang="start"] pub fn start(main: *u8, argc: int, argv: **c_char, crate_map: *u8) -> int { diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 9ed389b178aed..51e2849eb5480 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -882,6 +882,12 @@ rust_delete_memory_region(memory_region *region) { delete region; } +extern "C" CDECL boxed_region* +rust_current_boxed_region() { + rust_task *task = rust_get_current_task(); + return &task->boxed; +} + extern "C" CDECL boxed_region* rust_new_boxed_region(memory_region *region, uintptr_t poison_on_free) { diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 9add8d537af0c..d85700435e008 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -244,4 +244,5 @@ rust_drop_env_lock rust_update_log_settings rust_running_on_valgrind rust_get_num_cpus -rust_get_global_args_ptr \ No newline at end of file +rust_get_global_args_ptr +rust_current_boxed_region \ No newline at end of file From b530ca103388c99e774868645758785d6ad6b9a9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sun, 23 Jun 2013 14:01:59 -0700 Subject: [PATCH 078/111] std: Make unlinking and task notification work with newsched --- src/libstd/rt/task.rs | 25 +++++++++++++++++++++++++ src/libstd/task/spawn.rs | 22 +++++++++++++++++++--- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 68f7eb659b013..97c3b6a749bc9 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -290,4 +290,29 @@ mod test { a.next = Some(b); } } + + // XXX: This is a copy of test_future_result in std::task. + // It can be removed once the scheduler is turned on by default. + #[test] + fn future_result() { + do run_in_newsched_task { + use option::{Some, None}; + use task::*; + + let mut result = None; + let mut builder = task(); + builder.future_result(|r| result = Some(r)); + do builder.spawn {} + assert_eq!(result.unwrap().recv(), Success); + + result = None; + let mut builder = task(); + builder.future_result(|r| result = Some(r)); + builder.unlinked(); + do builder.spawn { + fail!(); + } + assert_eq!(result.unwrap().recv(), Failure); + } + } } diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index 344a58a877fc1..63eb768d1c9cd 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -578,13 +578,29 @@ pub fn spawn_raw(opts: TaskOpts, f: ~fn()) { } } -fn spawn_raw_newsched(_opts: TaskOpts, f: ~fn()) { +fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) { use rt::sched::*; - let task = do Local::borrow::() |running_task| { - ~running_task.new_child() + let mut task = if opts.linked { + do Local::borrow::() |running_task| { + ~running_task.new_child() + } + } else { + // An unlinked task is a new root in the task tree + ~Task::new_root() }; + if opts.notify_chan.is_some() { + let notify_chan = opts.notify_chan.swap_unwrap(); + let notify_chan = Cell::new(notify_chan); + let on_exit: ~fn(bool) = |success| { + notify_chan.take().send( + if success { Success } else { Failure } + ) + }; + task.on_exit = Some(on_exit); + } + let mut sched = Local::take::(); let task = ~Coroutine::with_task(&mut sched.stack_pool, task, f); From d071f51cdc7c3492ae2bc4180ffbf13bcdb31439 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sun, 23 Jun 2013 14:15:39 -0700 Subject: [PATCH 079/111] std::rt: deny(unused_unsafe) --- src/libstd/rt/mod.rs | 3 ++- src/libstd/rt/test.rs | 2 +- src/libstd/rt/uv/timer.rs | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index fec555e8afd4d..8713cf05a4772 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -58,6 +58,7 @@ Several modules in `core` are clients of `rt`: #[deny(unused_imports)]; #[deny(unused_mut)]; #[deny(unused_variable)]; +#[deny(unused_unsafe)]; use cell::Cell; use clone::Clone; @@ -224,7 +225,7 @@ pub fn run(main: ~fn()) -> int { let nthreads = match os::getenv("RUST_THREADS") { Some(nstr) => FromStr::from_str(nstr).get(), - None => unsafe { util::num_cpus() } + None => util::num_cpus() }; // The shared list of sleeping schedulers. Schedulers wake each other diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 36efcd91834b8..b0e4968401474 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -74,7 +74,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { do run_in_bare_thread { let nthreads = match os::getenv("RUST_TEST_THREADS") { Some(nstr) => FromStr::from_str(nstr).get(), - None => unsafe { + None => { // Using more threads than cores in test code // to force the OS to preempt them frequently. // Assuming that this help stress test concurrent types. diff --git a/src/libstd/rt/uv/timer.rs b/src/libstd/rt/uv/timer.rs index cd6fc5c0a250e..14465eb7dfd3a 100644 --- a/src/libstd/rt/uv/timer.rs +++ b/src/libstd/rt/uv/timer.rs @@ -160,14 +160,14 @@ mod test { let mut timer2 = TimerWatcher::new(&mut loop_); do timer2.start(10, 0) |timer2, _| { - unsafe { *count_ptr += 1; } + *count_ptr += 1; timer2.close(||()); // Restart the original timer let mut timer = timer; do timer.start(1, 0) |timer, _| { - unsafe { *count_ptr += 1; } + *count_ptr += 1; timer.close(||()); } } From e65d0cbabebc73f2c9733a7ed158576c9702e71e Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sun, 23 Jun 2013 18:22:57 -0700 Subject: [PATCH 080/111] extra: Make test runner compatible with newsched --- src/libextra/test.rs | 17 ++++------------- src/libstd/rt/mod.rs | 9 ++------- src/libstd/rt/util.rs | 12 ++++++++++++ src/test/run-pass/morestack6.rs | 3 --- 4 files changed, 18 insertions(+), 23 deletions(-) diff --git a/src/libextra/test.rs b/src/libextra/test.rs index 72837cb4ae1f7..64c6a822a86e7 100644 --- a/src/libextra/test.rs +++ b/src/libextra/test.rs @@ -31,14 +31,6 @@ use core::to_str::ToStr; use core::uint; use core::vec; -pub mod rustrt { - use core::libc::size_t; - - #[abi = "cdecl"] - pub extern { - pub unsafe fn rust_sched_threads() -> size_t; - } -} // The name of a test. By convention this follows the rules for rust // paths; i.e. it should be a series of identifiers separated by double @@ -488,11 +480,10 @@ static sched_overcommit : uint = 1; static sched_overcommit : uint = 4u; fn get_concurrency() -> uint { - unsafe { - let threads = rustrt::rust_sched_threads() as uint; - if threads == 1 { 1 } - else { threads * sched_overcommit } - } + use core::rt; + let threads = rt::util::default_sched_threads(); + if threads == 1 { 1 } + else { threads * sched_overcommit } } #[allow(non_implicitly_copyable_typarams)] diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 8713cf05a4772..bbf1cf0d9b797 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -63,11 +63,9 @@ Several modules in `core` are clients of `rt`: use cell::Cell; use clone::Clone; use container::Container; -use from_str::FromStr; use iter::Times; use iterator::IteratorUtil; -use option::{Some, None}; -use os; +use option::Some; use ptr::RawPtr; use rt::sched::{Scheduler, Coroutine, Shutdown}; use rt::sleeper_list::SleeperList; @@ -223,10 +221,7 @@ pub fn run(main: ~fn()) -> int { static DEFAULT_ERROR_CODE: int = 101; - let nthreads = match os::getenv("RUST_THREADS") { - Some(nstr) => FromStr::from_str(nstr).get(), - None => util::num_cpus() - }; + let nthreads = util::default_sched_threads(); // The shared list of sleeping schedulers. Schedulers wake each other // occassionally to do new work. diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 904b2f8bbb932..5219ae1d5406d 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -9,8 +9,11 @@ // except according to those terms. use container::Container; +use from_str::FromStr; use iterator::IteratorUtil; use libc; +use option::{Some, None}; +use os; use str::StrSlice; /// Get the number of cores available @@ -24,6 +27,15 @@ pub fn num_cpus() -> uint { } } +/// Get's the number of scheduler threads requested by the environment +/// either `RUST_THREADS` or `num_cpus`. +pub fn default_sched_threads() -> uint { + match os::getenv("RUST_THREADS") { + Some(nstr) => FromStr::from_str(nstr).get(), + None => num_cpus() + } +} + pub fn dumb_println(s: &str) { use io::WriterUtil; let dbg = ::libc::STDERR_FILENO as ::io::fd_t; diff --git a/src/test/run-pass/morestack6.rs b/src/test/run-pass/morestack6.rs index 1dc8503aeb22a..0e0bf2a13e171 100644 --- a/src/test/run-pass/morestack6.rs +++ b/src/test/run-pass/morestack6.rs @@ -23,7 +23,6 @@ mod rustrt { pub fn rust_get_sched_id() -> libc::intptr_t; pub fn rust_get_argc() -> libc::c_int; pub fn get_task_id() -> libc::intptr_t; - pub fn rust_sched_threads(); pub fn rust_get_task(); } } @@ -31,7 +30,6 @@ mod rustrt { fn calllink01() { unsafe { rustrt::rust_get_sched_id(); } } fn calllink02() { unsafe { rustrt::rust_get_argc(); } } fn calllink08() { unsafe { rustrt::get_task_id(); } } -fn calllink09() { unsafe { rustrt::rust_sched_threads(); } } fn calllink10() { unsafe { rustrt::rust_get_task(); } } fn runtest(f: extern fn(), frame_backoff: u32) { @@ -64,7 +62,6 @@ pub fn main() { calllink01, calllink02, calllink08, - calllink09, calllink10 ]; let mut rng = rand::rng(); From 794923c99511398bc90400e380dd11770ec8e614 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 11:43:40 -0700 Subject: [PATCH 081/111] UDP networking with tests --- src/libstd/rt/io/net/udp.rs | 103 +++++++++++++++++++++- src/libstd/rt/uv/mod.rs | 6 +- src/libstd/rt/uv/net.rs | 146 ++++++++++++++++++++++++------- src/libstd/rt/uv/uvio.rs | 170 +++++++++++++++++++++++++++++++----- src/libstd/rt/uv/uvll.rs | 2 +- 5 files changed, 366 insertions(+), 61 deletions(-) diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index ac5a118f22a59..97c09525d35f1 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -44,7 +44,6 @@ impl UdpSocket { Ok((nread, src)) => Some((nread, src)), Err(ioerr) => { // EOF is indicated by returning None - // XXX do we ever find EOF reading UDP packets? if ioerr.kind != EndOfFile { read_error::cond.raise(ioerr); } @@ -86,10 +85,12 @@ impl UdpStream { impl Reader for UdpStream { fn read(&mut self, buf: &mut [u8]) -> Option { - let conn = self.connectedTo; do self.as_socket |sock| { - sock.recvfrom(buf) - .map_consume(|(nread,src)| if src == conn {nread} else {0}) + match sock.recvfrom(buf) { + Some((_nread, src)) if src != self.connectedTo => Some(0), + Some((nread, _src)) => Some(nread), + None => None, + } } } @@ -105,3 +106,97 @@ impl Writer for UdpStream { fn flush(&mut self) { fail!() } } + +#[cfg(test)] +mod test { + use super::*; + use rt::test::*; + use rt::io::net::ip::Ipv4; + use rt::io::*; + use option::{Some, None}; + + #[test] #[ignore] + fn bind_error() { + do run_in_newsched_task { + let mut called = false; + do io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).in { + let addr = Ipv4(0, 0, 0, 0, 1); + let socket = UdpSocket::bind(addr); + assert!(socket.is_none()); + } + assert!(called); + } + } + + #[test] + fn socket_smoke_test() { + do run_in_newsched_task { + let server_ip = next_test_ip4(); + let client_ip = next_test_ip4(); + + do spawntask_immediately { + match UdpSocket::bind(server_ip) { + Some(server) => { + let mut buf = [0]; + match server.recvfrom(buf) { + Some((nread, src)) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + assert_eq!(src, client_ip); + } + None => fail!() + } + } + None => fail!() + } + } + + do spawntask_immediately { + match UdpSocket::bind(client_ip) { + Some(client) => client.sendto([99], server_ip), + None => fail!() + } + } + } + } + + #[test] + fn stream_smoke_test() { + do run_in_newsched_task { + let server_ip = next_test_ip4(); + let client_ip = next_test_ip4(); + + do spawntask_immediately { + match UdpSocket::bind(server_ip) { + Some(server) => { + let server = ~server; + let mut stream = server.connect(client_ip); + let mut buf = [0]; + match stream.read(buf) { + Some(nread) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + } + None => fail!() + } + } + None => fail!() + } + } + + do spawntask_immediately { + match UdpSocket::bind(client_ip) { + Some(client) => { + let client = ~client; + let mut stream = client.connect(server_ip); + stream.write([99]); + } + None => fail!() + } + } + } + } +} diff --git a/src/libstd/rt/uv/mod.rs b/src/libstd/rt/uv/mod.rs index a6a8edafe60f2..e39a6384bc63b 100644 --- a/src/libstd/rt/uv/mod.rs +++ b/src/libstd/rt/uv/mod.rs @@ -46,6 +46,7 @@ use libc::{c_void, c_int, size_t, malloc, free}; use cast::transmute; use ptr::null; use unstable::finally::Finally; +use rt::io::net::ip::IpAddr; use rt::io::IoError; @@ -126,7 +127,7 @@ pub type ConnectionCallback = ~fn(StreamWatcher, Option); pub type FsCallback = ~fn(FsRequest, Option); pub type TimerCallback = ~fn(TimerWatcher, Option); pub type AsyncCallback = ~fn(AsyncWatcher, Option); -pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, Ipv4, uint, Option); +pub type UdpReceiveCallback = ~fn(UdpWatcher, int, Buf, IpAddr, uint, Option); pub type UdpSendCallback = ~fn(UdpWatcher, Option); @@ -298,9 +299,6 @@ pub fn status_to_maybe_uv_error(handle: *T, status: c_int) -> Option /// The uv buffer type pub type Buf = uvll::uv_buf_t; -/// The uv IPv4 type -pub type Ipv4 = uvll::sockaddr_in; - /// Borrow a slice to a Buf pub fn slice_to_uv_buf(v: &[u8]) -> Buf { let data = vec::raw::to_ptr(v); diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index c88d96bd2306e..6a0f6f156b798 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -18,8 +18,12 @@ use rt::uv::{Loop, Watcher, Request, UvError, Buf, NativeHandle, NullCallback, use rt::io::net::ip::{IpAddr, Ipv4, Ipv6}; use rt::uv::last_uv_error; use vec; +use str; +use from_str::{FromStr}; -fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { +//#[cfg(test)] use rt::test::*; + +pub fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { match addr { Ipv4(a, b, c, d, p) => { unsafe { @@ -41,12 +45,21 @@ fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { pub fn uv_ip4_to_ip4(addr: *sockaddr_in) -> IpAddr { let ip4_size = 16; - let buf = vec::from_elem(ip4_size, 0u8); - unsafe { ip4_name(addr, &buf[0], ip4_size as u64) }; + let buf = vec::from_elem(ip4_size + 1 /*null terminated*/, 0u8); + unsafe { ip4_name(addr, vec::raw::to_ptr(buf), ip4_size as u64) }; let port = unsafe { ip4_port(addr) }; - Ipv4(buf[0], buf[1], buf[2], buf[3], port as u16) + let ip_str = str::from_bytes_slice(buf).trim_right_chars(&'\x00'); + let ip: ~[u8] = ip_str.split_iter('.') + .transform(|s: &str| -> u8 { + let x = FromStr::from_str(s); + assert!(x.is_some()); + x.unwrap() }) + .collect(); + assert!(ip.len() >= 4); + Ipv4(ip[0], ip[1], ip[2], ip[3], port as u16) } + // uv_stream t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t // and uv_file_t pub struct StreamWatcher(*uvll::uv_stream_t); @@ -266,7 +279,7 @@ pub struct UdpWatcher(*uvll::uv_udp_t); impl Watcher for UdpWatcher { } impl UdpWatcher { - pub fn new(loop_: &mut Loop) -> UdpWatcher { + pub fn new(loop_: &Loop) -> UdpWatcher { unsafe { let handle = malloc_handle(UV_UDP); assert!(handle.is_not_null()); @@ -277,7 +290,7 @@ impl UdpWatcher { } } - pub fn bind(&mut self, address: IpAddr) -> Result<(), UvError> { + pub fn bind(&self, address: IpAddr) -> Result<(), UvError> { match address { Ipv4(*) => { do ip4_as_uv_ip4(address) |addr| { @@ -295,58 +308,59 @@ impl UdpWatcher { } } - pub fn recv_start(&mut self, alloc: AllocCallback, cb: UdpReceiveCallback) { + pub fn recv_start(&self, alloc: AllocCallback, cb: UdpReceiveCallback) { { - let data = self.get_watcher_data(); + let mut this = *self; + let data = this.get_watcher_data(); data.alloc_cb = Some(alloc); data.udp_recv_cb = Some(cb); } let handle = self.native_handle(); - unsafe { uvll::read_start(handle, alloc_cb, recv_cb); } + unsafe { uvll::udp_recv_start(handle, alloc_cb, recv_cb); } extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - let data = udp_watcher.get_watcher_data(); - let alloc_cb = data.alloc_cb.get_ref(); + let alloc_cb = udp_watcher.get_watcher_data().alloc_cb.get_ref(); return (*alloc_cb)(suggested_size as uint); } /* TODO the socket address should actually be a pointer to either a sockaddr_in or sockaddr_in6. In libuv, the udp_recv callback takes a struct *sockaddr */ extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, - address: *uvll::sockaddr_in, flags: c_uint) { + addr: *uvll::sockaddr_in, flags: c_uint) { rtdebug!("buf addr: %x", buf.base as uint); rtdebug!("buf len: %d", buf.len as int); let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); let data = udp_watcher.get_watcher_data(); let cb = data.udp_recv_cb.get_ref(); let status = status_to_maybe_uv_error(handle, nread as c_int); - unsafe { (*cb)(udp_watcher, nread as int, buf, *address, flags as uint, status) }; + let address = uv_ip4_to_ip4(addr); + unsafe { (*cb)(udp_watcher, nread as int, buf, address, flags as uint, status) }; } } - pub fn recv_stop(&mut self) { + pub fn recv_stop(&self) { let handle = self.native_handle(); unsafe { uvll::udp_recv_stop(handle); } } - pub fn send(&mut self, buf: Buf, address: IpAddr, cb: UdpSendCallback) { + pub fn send(&self, buf: Buf, address: IpAddr, cb: UdpSendCallback) { { - let data = self.get_watcher_data(); + let mut this = *self; + let data = this.get_watcher_data(); assert!(data.udp_send_cb.is_none()); data.udp_send_cb = Some(cb); } let req = UdpSendRequest::new(); - let bufs = [buf]; match address { Ipv4(*) => { do ip4_as_uv_ip4(address) |addr| { unsafe { assert!(0 == uvll::udp_send(req.native_handle(), self.native_handle(), - bufs, addr, send_cb)); + [buf], addr, send_cb)); } } } @@ -357,11 +371,7 @@ impl UdpWatcher { let send_request: UdpSendRequest = NativeHandle::from_native_handle(req); let mut udp_watcher = send_request.handle(); send_request.delete(); - let cb = { - let data = udp_watcher.get_watcher_data(); - let cb = data.udp_send_cb.swap_unwrap(); - cb - }; + let cb = udp_watcher.get_watcher_data().udp_send_cb.swap_unwrap(); let status = status_to_maybe_uv_error(udp_watcher.native_handle(), status); cb(udp_watcher, status); } @@ -379,10 +389,7 @@ impl UdpWatcher { extern fn close_cb(handle: *uvll::uv_udp_t) { let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - { - let data = udp_watcher.get_watcher_data(); - data.close_cb.swap_unwrap()(); - } + udp_watcher.get_watcher_data().close_cb.swap_unwrap()(); udp_watcher.drop_watcher_data(); unsafe { free_handle(handle as *c_void) } } @@ -475,9 +482,7 @@ impl Request for UdpSendRequest { } impl UdpSendRequest { pub fn new() -> UdpSendRequest { - let send_handle = unsafe { - malloc_req(UV_UDP_SEND) - }; + let send_handle = unsafe { malloc_req(UV_UDP_SEND) }; assert!(send_handle.is_not_null()); let send_handle = send_handle as *uvll::uv_udp_send_t; UdpSendRequest(send_handle) @@ -485,8 +490,7 @@ impl UdpSendRequest { pub fn handle(&self) -> UdpWatcher { unsafe { - let udp_handle = uvll::get_udp_handle_from_send_req(self.native_handle()); - NativeHandle::from_native_handle(udp_handle) + NativeHandle::from_native_handle(uvll::get_udp_handle_from_send_req(self.native_handle())) } } @@ -516,6 +520,12 @@ mod test { use rt::uv::{Loop, AllocCallback}; use rt::uv::{vec_from_uv_buf, vec_to_uv_buf, slice_to_uv_buf}; + #[test] + fn test_ip4_conversion() { + let ip4 = next_test_ip4(); + assert_eq!(ip4, ip4_as_uv_ip4(ip4, uv_ip4_to_ip4)); + } + #[test] fn connect_close() { do run_in_bare_thread() { @@ -534,6 +544,19 @@ mod test { } } + #[test] + fn udp_bind_close() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let udp_watcher = { UdpWatcher::new(&mut loop_) }; + let addr = next_test_ip4(); + udp_watcher.bind(addr); + udp_watcher.close(||()); + loop_.run(); + loop_.close(); + } + } + #[test] fn listen() { do run_in_bare_thread() { @@ -609,4 +632,63 @@ mod test { loop_.close(); } } + + #[test] + fn udp_recv() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + + let server = UdpWatcher::new(&loop_); + assert!(server.bind(server_addr).is_ok()); + + rtdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0)) + }; + + do server.recv_start(alloc) |server, nread, buf, src, flags, status| { + server.recv_stop(); + rtdebug!("i'm reading!"); + assert!(status.is_none()); + assert_eq!(flags, 0); + assert_eq!(src, client_addr); + + let buf = vec_from_uv_buf(buf); + let mut count = 0; + rtdebug!("got %d bytes", nread); + + let buf = buf.unwrap(); + for buf.slice(0, nread as uint).iter().advance() |&byte| { + assert!(byte == count as u8); + rtdebug!("%u", byte as uint); + count += 1; + } + assert_eq!(count, MAX); + + server.close(||{}); + } + + do Thread::start { + let mut loop_ = Loop::new(); + let client = UdpWatcher::new(&loop_); + assert!(client.bind(client_addr).is_ok()); + let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let buf = slice_to_uv_buf(msg); + do client.send(buf, server_addr) |client, status| { + rtdebug!("writing"); + assert!(status.is_none()); + client.close(||{}); + } + + loop_.run(); + loop_.close(); + }; + + loop_.run(); + loop_.close(); + } + } } diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 828078f48654e..127fac6244e38 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -25,7 +25,6 @@ use rt::io::{standard_error, OtherIoError}; use rt::tube::Tube; use rt::local::Local; use unstable::sync::{Exclusive, exclusive}; -use rt::uv::net::uv_ip4_to_ip4; #[cfg(test)] use container::Container; #[cfg(test)] use uint; @@ -263,7 +262,7 @@ impl IoFactory for UvIoFactory { } fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError> { - let mut watcher = UdpWatcher::new(self.uv_loop()); + let /*mut*/ watcher = UdpWatcher::new(self.uv_loop()); match watcher.bind(addr) { Ok(_) => Ok(~UvUdpSocket { watcher: watcher }), Err(uverr) => { @@ -487,21 +486,19 @@ impl RtioUdpSocket for UvUdpSocket { do scheduler.deschedule_running_task_and_then |sched, task| { rtdebug!("recvfrom: entered scheduler context"); assert!(!sched.in_task_context()); - let mut watcher = watcher; let task_cell = Cell::new(task); let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.recv_start(alloc) |watcher, nread, buf, addr, flags, status| { - let _ = flags; // TODO - let _ = buf; // TODO + do watcher.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { + let _ = flags; // TODO add handling for partials? - let mut watcher = watcher; watcher.recv_stop(); - let result = if status.is_none() { - assert!(nread >= 0); - Ok((nread as uint, uv_ip4_to_ip4(&addr))) - } else { - Err(uv_error_to_io_error(status.unwrap())) + let result = match status { + None => { + assert!(nread >= 0); + Ok((nread as uint, addr)) + } + Some(err) => Err(uv_error_to_io_error(err)) }; unsafe { (*result_cell_ptr).put_back(result); } @@ -513,8 +510,8 @@ impl RtioUdpSocket for UvUdpSocket { assert!(!result_cell.is_empty()); return result_cell.take(); - } + fn sendto(&self, buf: &[u8], dst: IpAddr) -> Result<(), IoError> { let result_cell = Cell::new_empty(); let result_cell_ptr: *Cell> = &result_cell; @@ -523,16 +520,13 @@ impl RtioUdpSocket for UvUdpSocket { let watcher = self.watcher(); let buf_ptr: *&[u8] = &buf; do scheduler.deschedule_running_task_and_then |_, task| { - let mut watcher = watcher; let task_cell = Cell::new(task); let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.send(buf, dst) |watcher, status| { - let _ = watcher; // TODO + do watcher.send(buf, dst) |_watcher, status| { - let result = if status.is_none() { - Ok(()) - } else { - Err(uv_error_to_io_error(status.unwrap())) + let result = match status { + None => Ok(()), + Some(err) => Err(uv_error_to_io_error(err)), }; unsafe { (*result_cell_ptr).put_back(result); } @@ -559,6 +553,18 @@ fn test_simple_io_no_connect() { } } +#[test] +fn test_simple_udp_io_bind_only() { + do run_in_newsched_task { + unsafe { + let io = Local::unsafe_borrow::(); + let addr = next_test_ip4(); + let maybe_socket = (*io).udp_bind(addr); + assert!(maybe_socket.is_ok()); + } + } +} + #[test] fn test_simple_tcp_server_and_client() { do run_in_newsched_task { @@ -590,6 +596,37 @@ fn test_simple_tcp_server_and_client() { } } +#[test] +fn test_simple_udp_server_and_client() { + do run_in_newsched_task { + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + + do spawntask_immediately { + unsafe { + let io = Local::unsafe_borrow::(); + let server_socket = (*io).udp_bind(server_addr).unwrap(); + let mut buf = [0, .. 2048]; + let (nread,src) = server_socket.recvfrom(buf).unwrap(); + assert_eq!(nread, 8); + for uint::range(0, nread) |i| { + rtdebug!("%u", buf[i] as uint); + assert_eq!(buf[i], i as u8); + } + assert_eq!(src, client_addr); + } + } + + do spawntask_immediately { + unsafe { + let io = Local::unsafe_borrow::(); + let client_socket = (*io).udp_bind(client_addr).unwrap(); + client_socket.sendto([0, 1, 2, 3, 4, 5, 6, 7], server_addr); + } + } + } +} + #[test] #[ignore(reason = "busted")] fn test_read_and_block() { do run_in_newsched_task { @@ -681,3 +718,96 @@ fn test_read_read_read() { } } } + +#[test] +fn test_udp_twice() { + do run_in_newsched_task { + let server_addr = next_test_ip4(); + let client_addr = next_test_ip4(); + + do spawntask_immediately { + unsafe { + let io = Local::unsafe_borrow::(); + let client = (*io).udp_bind(client_addr).unwrap(); + assert!(client.sendto([1], server_addr).is_ok()); + assert!(client.sendto([2], server_addr).is_ok()); + } + } + + do spawntask_immediately { + unsafe { + let io = Local::unsafe_borrow::(); + let server = (*io).udp_bind(server_addr).unwrap(); + let mut buf1 = [0]; + let mut buf2 = [0]; + let (nread1, src1) = server.recvfrom(buf1).unwrap(); + let (nread2, src2) = server.recvfrom(buf2).unwrap(); + assert_eq!(nread1, 1); + assert_eq!(nread2, 1); + assert_eq!(src1, client_addr); + assert_eq!(src2, client_addr); + assert_eq!(buf1[0], 1); + assert_eq!(buf2[0], 2); + } + } + } +} + +#[test] +fn test_udp_many_read() { + do run_in_newsched_task { + let server_out_addr = next_test_ip4(); + let server_in_addr = next_test_ip4(); + let client_out_addr = next_test_ip4(); + let client_in_addr = next_test_ip4(); + static MAX: uint = 500_000; + + do spawntask_immediately { + unsafe { + let io = Local::unsafe_borrow::(); + let server_out = (*io).udp_bind(server_out_addr).unwrap(); + let server_in = (*io).udp_bind(server_in_addr).unwrap(); + let msg = [1, .. 2048]; + let mut total_bytes_sent = 0; + let mut buf = [1]; + while buf[0] == 1 { + // send more data + assert!(server_out.sendto(msg, client_in_addr).is_ok()); + total_bytes_sent += msg.len(); + // check if the client has received enough + let res = server_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(nread, 1); + assert_eq!(src, client_out_addr); + } + assert!(total_bytes_sent >= MAX); + } + } + + do spawntask_immediately { + unsafe { + let io = Local::unsafe_borrow::(); + let client_out = (*io).udp_bind(client_out_addr).unwrap(); + let client_in = (*io).udp_bind(client_in_addr).unwrap(); + let mut total_bytes_recv = 0; + let mut buf = [0, .. 2048]; + while total_bytes_recv < MAX { + // ask for more + assert!(client_out.sendto([1], server_in_addr).is_ok()); + // wait for data + let res = client_in.recvfrom(buf); + assert!(res.is_ok()); + let (nread, src) = res.unwrap(); + assert_eq!(src, server_out_addr); + total_bytes_recv += nread; + for uint::range(0, nread) |i| { + assert_eq!(buf[i], 1); + } + } + // tell the server we're done + assert!(client_out.sendto([0], server_in_addr).is_ok()); + } + } + } +} diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index b73db77f3bb56..3bfc123dc8524 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -215,7 +215,7 @@ pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], addr: *sockaddr_in6, cb: uv_udp_send_cb) -> c_int { let buf_ptr = vec::raw::to_ptr(buf_in); let buf_cnt = buf_in.len() as i32; - return rust_uv_udp_send(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); + return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, on_recv: uv_udp_recv_cb) -> c_int { From 1af20163586d13f505492865f70fe9767f35a306 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 11:59:47 -0700 Subject: [PATCH 082/111] removed unncessary unsafe block that was stopping compliation. --- src/libstd/rt/uv/net.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 6a0f6f156b798..34388b3dc1560 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -336,7 +336,7 @@ impl UdpWatcher { let cb = data.udp_recv_cb.get_ref(); let status = status_to_maybe_uv_error(handle, nread as c_int); let address = uv_ip4_to_ip4(addr); - unsafe { (*cb)(udp_watcher, nread as int, buf, address, flags as uint, status) }; + (*cb)(udp_watcher, nread as int, buf, address, flags as uint, status); } } From f202713b73559941ad64c539dc10b71655879403 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 14:40:36 -0700 Subject: [PATCH 083/111] satisfy the formatting check --- src/libstd/rt/io/net/udp.rs | 4 ++-- src/libstd/rt/uv/net.rs | 23 +++++++++++++---------- src/libstd/rt/uv/uvio.rs | 4 ++-- src/libstd/rt/uv/uvll.rs | 3 ++- src/rt/rust_uv.cpp | 4 ++-- 5 files changed, 21 insertions(+), 17 deletions(-) diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index 97c09525d35f1..d85fb06243066 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -84,7 +84,7 @@ impl UdpStream { } impl Reader for UdpStream { - fn read(&mut self, buf: &mut [u8]) -> Option { + fn read(&mut self, buf: &mut [u8]) -> Option { do self.as_socket |sock| { match sock.recvfrom(buf) { Some((_nread, src)) if src != self.connectedTo => Some(0), @@ -131,7 +131,7 @@ mod test { } } - #[test] + #[test] fn socket_smoke_test() { do run_in_newsched_task { let server_ip = next_test_ip4(); diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 34388b3dc1560..eac70d5219a3a 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -50,12 +50,12 @@ pub fn uv_ip4_to_ip4(addr: *sockaddr_in) -> IpAddr { let port = unsafe { ip4_port(addr) }; let ip_str = str::from_bytes_slice(buf).trim_right_chars(&'\x00'); let ip: ~[u8] = ip_str.split_iter('.') - .transform(|s: &str| -> u8 { - let x = FromStr::from_str(s); + .transform(|s: &str| -> u8 { + let x = FromStr::from_str(s); assert!(x.is_some()); x.unwrap() }) .collect(); - assert!(ip.len() >= 4); + assert!(ip.len() >= 4); Ipv4(ip[0], ip[1], ip[2], ip[3], port as u16) } @@ -304,7 +304,7 @@ impl UdpWatcher { } } } - _ => fail!() // TODO ipv6 + _ => fail!() // NOTE ipv6 } } @@ -325,9 +325,10 @@ impl UdpWatcher { return (*alloc_cb)(suggested_size as uint); } - /* TODO the socket address should actually be a pointer to either a sockaddr_in or sockaddr_in6. + /* NOTE the socket address should actually be a pointer to + either a sockaddr_in or sockaddr_in6. In libuv, the udp_recv callback takes a struct *sockaddr */ - extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, + extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, addr: *uvll::sockaddr_in, flags: c_uint) { rtdebug!("buf addr: %x", buf.base as uint); rtdebug!("buf len: %d", buf.len as int); @@ -364,7 +365,7 @@ impl UdpWatcher { } } } - _ => fail!() // TODO ipv6 + _ => fail!() // NOTE ipv6 } extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { @@ -490,7 +491,9 @@ impl UdpSendRequest { pub fn handle(&self) -> UdpWatcher { unsafe { - NativeHandle::from_native_handle(uvll::get_udp_handle_from_send_req(self.native_handle())) + NativeHandle::from_native_handle( + uvll::get_udp_handle_from_send_req( + self.native_handle())) } } @@ -544,7 +547,7 @@ mod test { } } - #[test] + #[test] fn udp_bind_close() { do run_in_bare_thread() { let mut loop_ = Loop::new(); @@ -633,7 +636,7 @@ mod test { } } - #[test] + #[test] fn udp_recv() { do run_in_bare_thread() { static MAX: int = 10; diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 127fac6244e38..6b4cb66ed9998 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -489,7 +489,7 @@ impl RtioUdpSocket for UvUdpSocket { let task_cell = Cell::new(task); let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; do watcher.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { - let _ = flags; // TODO add handling for partials? + let _ = flags; // NOTE add handling for partials? watcher.recv_stop(); @@ -596,7 +596,7 @@ fn test_simple_tcp_server_and_client() { } } -#[test] +#[test] fn test_simple_udp_server_and_client() { do run_in_newsched_task { let server_addr = next_test_ip4(); diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index 3bfc123dc8524..841a9bf8ef7c3 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -218,7 +218,8 @@ pub unsafe fn udp_send6(req: *uv_udp_send_t, handle: *T, buf_in: &[uv_buf_t], return rust_uv_udp_send6(req, handle as *c_void, buf_ptr, buf_cnt, addr, cb); } -pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, on_recv: uv_udp_recv_cb) -> c_int { +pub unsafe fn udp_recv_start(server: *uv_udp_t, on_alloc: uv_alloc_cb, + on_recv: uv_udp_recv_cb) -> c_int { return rust_uv_udp_recv_start(server, on_alloc, on_recv); } diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 2fb9dc2f1a25c..6032ed1a6bdba 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -309,13 +309,13 @@ rust_uv_udp_bind6(uv_udp_t* server, sockaddr_in6* addr_ptr, unsigned flags) { } extern "C" int -rust_uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, +rust_uv_udp_send(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, int buf_cnt, sockaddr_in* addr_ptr, uv_udp_send_cb cb) { return uv_udp_send(req, handle, buf_in, buf_cnt, *addr_ptr, cb); } extern "C" int -rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, +rust_uv_udp_send6(uv_udp_send_t* req, uv_udp_t* handle, uv_buf_t* buf_in, int buf_cnt, sockaddr_in6* addr_ptr, uv_udp_send_cb cb) { return uv_udp_send6(req, handle, buf_in, buf_cnt, *addr_ptr, cb); } From 2c5cfe1037d618153b39597e0e1e62d95c8e4760 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 16:02:51 -0700 Subject: [PATCH 084/111] removed obsolete FIXMEs. formatting changes. --- src/libstd/rt/uv/uvll.rs | 78 +++++++++++++--------------------------- 1 file changed, 24 insertions(+), 54 deletions(-) diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index 841a9bf8ef7c3..7035cb6a5e80f 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -235,27 +235,20 @@ pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { return rust_uv_tcp_init(loop_handle, handle); } -// FIXME ref #2064 -pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in, - after_connect_cb: *u8) -> c_int { - return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, - after_connect_cb, addr_ptr); -} -// FIXME ref #2064 -pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - addr_ptr: *sockaddr_in6, - after_connect_cb: *u8) -> c_int { - return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, - after_connect_cb, addr_ptr); -} -// FIXME ref #2064 +pub unsafe fn tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, + addr_ptr: *sockaddr_in, after_connect_cb: *u8) -> c_int { + return rust_uv_tcp_connect(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); +} + +pub unsafe fn tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, + addr_ptr: *sockaddr_in6, after_connect_cb: *u8) -> c_int { + return rust_uv_tcp_connect6(connect_ptr, tcp_handle_ptr, after_connect_cb, addr_ptr); +} + pub unsafe fn tcp_bind(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in) -> c_int { return rust_uv_tcp_bind(tcp_server_ptr, addr_ptr); } -// FIXME ref #2064 + pub unsafe fn tcp_bind6(tcp_server_ptr: *uv_tcp_t, addr_ptr: *sockaddr_in6) -> c_int { return rust_uv_tcp_bind6(tcp_server_ptr, addr_ptr); } @@ -444,16 +437,11 @@ extern { fn rust_uv_idle_stop(handle: *uv_idle_t) -> c_int; fn rust_uv_async_send(handle: *uv_async_t); - fn rust_uv_async_init(loop_handle: *c_void, - async_handle: *uv_async_t, - cb: *u8) -> c_int; + fn rust_uv_async_init(loop_handle: *c_void, async_handle: *uv_async_t, cb: *u8) -> c_int; fn rust_uv_tcp_init(loop_handle: *c_void, handle_ptr: *uv_tcp_t) -> c_int; - // FIXME ref #2604 .. ? fn rust_uv_buf_init(out_buf: *uv_buf_t, base: *u8, len: size_t); fn rust_uv_last_error(loop_handle: *c_void) -> uv_err_t; - // FIXME ref #2064 fn rust_uv_strerror(err: *uv_err_t) -> *c_char; - // FIXME ref #2064 fn rust_uv_err_name(err: *uv_err_t) -> *c_char; fn rust_uv_ip4_addrp(ip: *u8, port: c_int) -> *sockaddr_in; fn rust_uv_ip6_addrp(ip: *u8, port: c_int) -> *sockaddr_in6; @@ -463,52 +451,34 @@ extern { fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; - // FIXME ref #2064 - fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - after_cb: *u8, + fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, after_cb: *u8, addr: *sockaddr_in) -> c_int; - // FIXME ref #2064 fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; - // FIXME ref #2064 - fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t, - tcp_handle_ptr: *uv_tcp_t, - after_cb: *u8, + fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, after_cb: *u8, addr: *sockaddr_in6) -> c_int; - // FIXME ref #2064 fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; - fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, - name: *sockaddr_in) -> c_int; - fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, - name: *sockaddr_in6) ->c_int; + fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in) -> c_int; + fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int; fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int; fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; - fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, - buf_cnt: c_int, addr: *sockaddr_in, cb: *u8) -> c_int; - fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, - buf_cnt: c_int, addr: *sockaddr_in6, cb: *u8) -> c_int; + fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, buf_cnt: c_int, + addr: *sockaddr_in, cb: *u8) -> c_int; + fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, buf_cnt: c_int, + addr: *sockaddr_in6, cb: *u8) -> c_int; fn rust_uv_udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int; fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int; fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; - fn rust_uv_write(req: *c_void, - stream: *c_void, - buf_in: *uv_buf_t, - buf_cnt: c_int, + fn rust_uv_write(req: *c_void, stream: *c_void, buf_in: *uv_buf_t, buf_cnt: c_int, cb: *u8) -> c_int; - fn rust_uv_read_start(stream: *c_void, - on_alloc: *u8, - on_read: *u8) -> c_int; + fn rust_uv_read_start(stream: *c_void, on_alloc: *u8, on_read: *u8) -> c_int; fn rust_uv_read_stop(stream: *c_void) -> c_int; - fn rust_uv_timer_init(loop_handle: *c_void, - timer_handle: *uv_timer_t) -> c_int; - fn rust_uv_timer_start(timer_handle: *uv_timer_t, - cb: *u8, - timeout: libc::uint64_t, + fn rust_uv_timer_init(loop_handle: *c_void, timer_handle: *uv_timer_t) -> c_int; + fn rust_uv_timer_start(timer_handle: *uv_timer_t, cb: *u8, timeout: libc::uint64_t, repeat: libc::uint64_t) -> c_int; fn rust_uv_timer_stop(handle: *uv_timer_t) -> c_int; From d0c812f2a8064a9ea1b6a309343c00a5c18c9ce4 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 16:03:24 -0700 Subject: [PATCH 085/111] IPv6 struct --- src/libstd/rt/io/net/ip.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/libstd/rt/io/net/ip.rs b/src/libstd/rt/io/net/ip.rs index d71b891350ecb..3a93fd705436e 100644 --- a/src/libstd/rt/io/net/ip.rs +++ b/src/libstd/rt/io/net/ip.rs @@ -8,8 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +type Port = u16; + #[deriving(Eq, TotalEq)] pub enum IpAddr { - Ipv4(u8, u8, u8, u8, u16), - Ipv6 + Ipv4(u8, u8, u8, u8, Port), + Ipv6(u16, u16, u16, u16, u16, u16, u16, u16, Port) } From c5b19f0bf9c2f32f368b0f2565a06ae0271a96c1 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 16:04:09 -0700 Subject: [PATCH 086/111] changed outdated match on IpAddr --- src/libstd/rt/uv/net.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index eac70d5219a3a..9ddc99af69996 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -15,14 +15,12 @@ use rt::uv::uvll::*; use rt::uv::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, UdpSendCallback}; use rt::uv::{Loop, Watcher, Request, UvError, Buf, NativeHandle, NullCallback, status_to_maybe_uv_error}; -use rt::io::net::ip::{IpAddr, Ipv4, Ipv6}; +use rt::io::net::ip::{IpAddr, Ipv4}; use rt::uv::last_uv_error; use vec; use str; use from_str::{FromStr}; -//#[cfg(test)] use rt::test::*; - pub fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { match addr { Ipv4(a, b, c, d, p) => { @@ -39,7 +37,7 @@ pub fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { } } } - Ipv6 => fail!() + _ => fail!() // NOTE ipv6 } } From f60468629566ae896e90039f89ef5bb63f920aef Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 17:04:28 -0700 Subject: [PATCH 087/111] converted UvUdpSocket into a newtype struct --- src/libstd/rt/uv/uvio.rs | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 6b4cb66ed9998..905087103fc08 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -264,7 +264,7 @@ impl IoFactory for UvIoFactory { fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError> { let /*mut*/ watcher = UdpWatcher::new(self.uv_loop()); match watcher.bind(addr) { - Ok(_) => Ok(~UvUdpSocket { watcher: watcher }), + Ok(_) => Ok(~UvUdpSocket(watcher)), Err(uverr) => { let scheduler = Local::take::(); do scheduler.deschedule_running_task_and_then |_, task| { @@ -451,22 +451,15 @@ impl RtioTcpStream for UvTcpStream { } } -pub struct UvUdpSocket { - watcher: UdpWatcher -} - -impl UvUdpSocket { - fn watcher(&self) -> UdpWatcher { self.watcher } -} +pub struct UvUdpSocket(UdpWatcher); impl Drop for UvUdpSocket { fn finalize(&self) { rtdebug!("closing udp socket"); - let watcher = self.watcher(); let scheduler = Local::take::(); do scheduler.deschedule_running_task_and_then |_, task| { let task_cell = Cell::new(task); - do watcher.close { + do self.close { let scheduler = Local::take::(); scheduler.resume_task_immediately(task_cell.take()); } @@ -481,14 +474,13 @@ impl RtioUdpSocket for UvUdpSocket { let scheduler = Local::take::(); assert!(scheduler.in_task_context()); - let watcher = self.watcher(); let buf_ptr: *&mut [u8] = &buf; do scheduler.deschedule_running_task_and_then |sched, task| { rtdebug!("recvfrom: entered scheduler context"); assert!(!sched.in_task_context()); let task_cell = Cell::new(task); let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { + do self.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { let _ = flags; // NOTE add handling for partials? watcher.recv_stop(); @@ -517,12 +509,11 @@ impl RtioUdpSocket for UvUdpSocket { let result_cell_ptr: *Cell> = &result_cell; let scheduler = Local::take::(); assert!(scheduler.in_task_context()); - let watcher = self.watcher(); let buf_ptr: *&[u8] = &buf; do scheduler.deschedule_running_task_and_then |_, task| { let task_cell = Cell::new(task); let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.send(buf, dst) |_watcher, status| { + do self.send(buf, dst) |_watcher, status| { let result = match status { None => Ok(()), From 34b1135b59b15d66a2c5db77563f5b6a13deec15 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 25 Jun 2013 17:05:59 -0700 Subject: [PATCH 088/111] Converted UdpSocket into a newtype struct and (dis)connecting uses move semantics rather than ~. --- src/libstd/rt/io/net/udp.rs | 39 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index d85fb06243066..2452441b96573 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -16,31 +16,24 @@ use rt::io::{io_error, read_error, EndOfFile}; use rt::rtio::{RtioUdpSocketObject, RtioUdpSocket, IoFactory, IoFactoryObject}; use rt::local::Local; -pub struct UdpSocket { - rtsocket: ~RtioUdpSocketObject -} +pub struct UdpSocket(~RtioUdpSocketObject); impl UdpSocket { - fn new(s: ~RtioUdpSocketObject) -> UdpSocket { - UdpSocket { rtsocket: s } - } + fn new(s: ~RtioUdpSocketObject) -> UdpSocket { UdpSocket(s) } pub fn bind(addr: IpAddr) -> Option { - let socket = unsafe { - let io = Local::unsafe_borrow::(); - (*io).udp_bind(addr) - }; + let socket = unsafe { (*Local::unsafe_borrow::()).udp_bind(addr) }; match socket { - Ok(s) => { Some(UdpSocket { rtsocket: s }) } + Ok(s) => Some(UdpSocket::new(s)), Err(ioerr) => { io_error::cond.raise(ioerr); - return None; + None } } } pub fn recvfrom(&self, buf: &mut [u8]) -> Option<(uint, IpAddr)> { - match (*self.rtsocket).recvfrom(buf) { + match (**self).recvfrom(buf) { Ok((nread, src)) => Some((nread, src)), Err(ioerr) => { // EOF is indicated by returning None @@ -53,34 +46,26 @@ impl UdpSocket { } pub fn sendto(&self, buf: &[u8], dst: IpAddr) { - match (*self.rtsocket).sendto(buf, dst) { + match (**self).sendto(buf, dst) { Ok(_) => (), - Err(ioerr) => { - io_error::cond.raise(ioerr); - } + Err(ioerr) => io_error::cond.raise(ioerr), } } - // XXX convert ~self to self eventually - pub fn connect(~self, other: IpAddr) -> UdpStream { + pub fn connect(self, other: IpAddr) -> UdpStream { UdpStream { socket: self, connectedTo: other } } } pub struct UdpStream { - socket: ~UdpSocket, + socket: UdpSocket, connectedTo: IpAddr } impl UdpStream { - pub fn as_socket(&self, f: &fn(&UdpSocket) -> T) -> T { - f(self.socket) - } + pub fn as_socket(&self, f: &fn(&UdpSocket) -> T) -> T { f(&self.socket) } - pub fn disconnect(self) -> ~UdpSocket { - let UdpStream { socket: s, _ } = self; - s - } + pub fn disconnect(self) -> UdpSocket { self.socket } } impl Reader for UdpStream { From d0dc6970d8b8bb0e6cc358ec169daa70d99e1d15 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 26 Jun 2013 09:37:16 -0700 Subject: [PATCH 089/111] removed unecessary method --- src/libstd/rt/io/net/udp.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index 2452441b96573..c66f7d8ce0616 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -19,12 +19,10 @@ use rt::local::Local; pub struct UdpSocket(~RtioUdpSocketObject); impl UdpSocket { - fn new(s: ~RtioUdpSocketObject) -> UdpSocket { UdpSocket(s) } - pub fn bind(addr: IpAddr) -> Option { let socket = unsafe { (*Local::unsafe_borrow::()).udp_bind(addr) }; match socket { - Ok(s) => Some(UdpSocket::new(s)), + Ok(s) => Some(UdpSocket(s)), Err(ioerr) => { io_error::cond.raise(ioerr); None From 87ecfb74357b669308a6e337ebc766af8a03b554 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 26 Jun 2013 09:37:48 -0700 Subject: [PATCH 090/111] converted TCP interface to newtype structs --- src/libstd/rt/io/net/tcp.rs | 33 +++++++++------------------------ 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/src/libstd/rt/io/net/tcp.rs b/src/libstd/rt/io/net/tcp.rs index 3607f781da3ff..947fade096b92 100644 --- a/src/libstd/rt/io/net/tcp.rs +++ b/src/libstd/rt/io/net/tcp.rs @@ -18,15 +18,11 @@ use rt::rtio::{IoFactory, IoFactoryObject, RtioTcpStream, RtioTcpStreamObject}; use rt::local::Local; -pub struct TcpStream { - rtstream: ~RtioTcpStreamObject -} +pub struct TcpStream(~RtioTcpStreamObject); impl TcpStream { fn new(s: ~RtioTcpStreamObject) -> TcpStream { - TcpStream { - rtstream: s - } + TcpStream(s) } pub fn connect(addr: IpAddr) -> Option { @@ -38,13 +34,11 @@ impl TcpStream { }; match stream { - Ok(s) => { - Some(TcpStream::new(s)) - } + Ok(s) => Some(TcpStream::new(s)), Err(ioerr) => { rtdebug!("failed to connect: %?", ioerr); io_error::cond.raise(ioerr); - return None; + None } } } @@ -52,8 +46,7 @@ impl TcpStream { impl Reader for TcpStream { fn read(&mut self, buf: &mut [u8]) -> Option { - let bytes_read = self.rtstream.read(buf); - match bytes_read { + match (**self).read(buf) { Ok(read) => Some(read), Err(ioerr) => { // EOF is indicated by returning None @@ -70,8 +63,7 @@ impl Reader for TcpStream { impl Writer for TcpStream { fn write(&mut self, buf: &[u8]) { - let res = self.rtstream.write(buf); - match res { + match (**self).write(buf) { Ok(_) => (), Err(ioerr) => { io_error::cond.raise(ioerr); @@ -82,9 +74,7 @@ impl Writer for TcpStream { fn flush(&mut self) { fail!() } } -pub struct TcpListener { - rtlistener: ~RtioTcpListenerObject, -} +pub struct TcpListener(~RtioTcpListenerObject); impl TcpListener { pub fn bind(addr: IpAddr) -> Option { @@ -93,11 +83,7 @@ impl TcpListener { (*io).tcp_bind(addr) }; match listener { - Ok(l) => { - Some(TcpListener { - rtlistener: l - }) - } + Ok(l) => Some(TcpListener(l)), Err(ioerr) => { io_error::cond.raise(ioerr); return None; @@ -108,8 +94,7 @@ impl TcpListener { impl Listener for TcpListener { fn accept(&mut self) -> Option { - let rtstream = self.rtlistener.accept(); - match rtstream { + match (**self).accept() { Ok(s) => { Some(TcpStream::new(s)) } From ce97bd4c8b841165bb22cb0be566a9b66931165a Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 26 Jun 2013 10:17:10 -0700 Subject: [PATCH 091/111] cleaned up uv/net --- src/libstd/rt/uv/net.rs | 100 +++++++++++++--------------------------- 1 file changed, 32 insertions(+), 68 deletions(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 9ddc99af69996..8c5f9fdbd4a52 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -44,8 +44,8 @@ pub fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { pub fn uv_ip4_to_ip4(addr: *sockaddr_in) -> IpAddr { let ip4_size = 16; let buf = vec::from_elem(ip4_size + 1 /*null terminated*/, 0u8); - unsafe { ip4_name(addr, vec::raw::to_ptr(buf), ip4_size as u64) }; - let port = unsafe { ip4_port(addr) }; + unsafe { uvll::ip4_name(addr, vec::raw::to_ptr(buf), ip4_size as u64) }; + let port = unsafe { uvll::ip4_port(addr) }; let ip_str = str::from_bytes_slice(buf).trim_right_chars(&'\x00'); let ip: ~[u8] = ip_str.split_iter('.') .transform(|s: &str| -> u8 { @@ -71,13 +71,11 @@ impl StreamWatcher { data.read_cb = Some(cb); } - let handle = self.native_handle(); - unsafe { uvll::read_start(handle, alloc_cb, read_cb); } + unsafe { uvll::read_start(self.native_handle(), alloc_cb, read_cb); } extern fn alloc_cb(stream: *uvll::uv_stream_t, suggested_size: size_t) -> Buf { let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); - let data = stream_watcher.get_watcher_data(); - let alloc_cb = data.alloc_cb.get_ref(); + let alloc_cb = stream_watcher.get_watcher_data().alloc_cb.get_ref(); return (*alloc_cb)(suggested_size as uint); } @@ -85,8 +83,7 @@ impl StreamWatcher { rtdebug!("buf addr: %x", buf.base as uint); rtdebug!("buf len: %d", buf.len as int); let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(stream); - let data = stream_watcher.get_watcher_data(); - let cb = data.read_cb.get_ref(); + let cb = stream_watcher.get_watcher_data().read_cb.get_ref(); let status = status_to_maybe_uv_error(stream, nread as c_int); (*cb)(stream_watcher, nread as int, buf, status); } @@ -108,22 +105,15 @@ impl StreamWatcher { } let req = WriteRequest::new(); - let bufs = [buf]; unsafe { - assert!(0 == uvll::write(req.native_handle(), - self.native_handle(), - bufs, write_cb)); + assert_eq!(0, uvll::write(req.native_handle(), self.native_handle(), [buf], write_cb)); } extern fn write_cb(req: *uvll::uv_write_t, status: c_int) { let write_request: WriteRequest = NativeHandle::from_native_handle(req); let mut stream_watcher = write_request.stream(); write_request.delete(); - let cb = { - let data = stream_watcher.get_watcher_data(); - let cb = data.write_cb.swap_unwrap(); - cb - }; + let cb = stream_watcher.get_watcher_data().write_cb.swap_unwrap(); let status = status_to_maybe_uv_error(stream_watcher.native_handle(), status); cb(stream_watcher, status); } @@ -132,9 +122,7 @@ impl StreamWatcher { pub fn accept(&mut self, stream: StreamWatcher) { let self_handle = self.native_handle() as *c_void; let stream_handle = stream.native_handle() as *c_void; - unsafe { - assert_eq!(0, uvll::accept(self_handle, stream_handle)); - } + assert_eq!(0, unsafe { uvll::accept(self_handle, stream_handle) } ); } pub fn close(self, cb: NullCallback) { @@ -149,10 +137,7 @@ impl StreamWatcher { extern fn close_cb(handle: *uvll::uv_stream_t) { let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle); - { - let data = stream_watcher.get_watcher_data(); - data.close_cb.swap_unwrap()(); - } + stream_watcher.get_watcher_data().close_cb.swap_unwrap()(); stream_watcher.drop_watcher_data(); unsafe { free_handle(handle as *c_void) } } @@ -160,8 +145,7 @@ impl StreamWatcher { } impl NativeHandle<*uvll::uv_stream_t> for StreamWatcher { - fn from_native_handle( - handle: *uvll::uv_stream_t) -> StreamWatcher { + fn from_native_handle(handle: *uvll::uv_stream_t) -> StreamWatcher { StreamWatcher(handle) } fn native_handle(&self) -> *uvll::uv_stream_t { @@ -188,9 +172,7 @@ impl TcpWatcher { match address { Ipv4(*) => { do ip4_as_uv_ip4(address) |addr| { - let result = unsafe { - uvll::tcp_bind(self.native_handle(), addr) - }; + let result = unsafe { uvll::tcp_bind(self.native_handle(), addr) }; if result == 0 { Ok(()) } else { @@ -212,9 +194,9 @@ impl TcpWatcher { Ipv4(*) => { do ip4_as_uv_ip4(address) |addr| { rtdebug!("connect_t: %x", connect_handle as uint); - assert!(0 == uvll::tcp_connect(connect_handle, - self.native_handle(), - addr, connect_cb)); + assert_eq!(0, + uvll::tcp_connect(connect_handle, self.native_handle(), + addr, connect_cb)); } } _ => fail!() @@ -225,10 +207,7 @@ impl TcpWatcher { let connect_request: ConnectRequest = NativeHandle::from_native_handle(req); let mut stream_watcher = connect_request.stream(); connect_request.delete(); - let cb: ConnectionCallback = { - let data = stream_watcher.get_watcher_data(); - data.connect_cb.swap_unwrap() - }; + let cb = stream_watcher.get_watcher_data().connect_cb.swap_unwrap(); let status = status_to_maybe_uv_error(stream_watcher.native_handle(), status); cb(stream_watcher, status); } @@ -245,15 +224,13 @@ impl TcpWatcher { unsafe { static BACKLOG: c_int = 128; // XXX should be configurable // XXX: This can probably fail - assert!(0 == uvll::listen(self.native_handle(), - BACKLOG, connection_cb)); + assert_eq!(0, uvll::listen(self.native_handle(), BACKLOG, connection_cb)); } extern fn connection_cb(handle: *uvll::uv_stream_t, status: c_int) { rtdebug!("connection_cb"); let mut stream_watcher: StreamWatcher = NativeHandle::from_native_handle(handle); - let data = stream_watcher.get_watcher_data(); - let cb = data.connect_cb.get_ref(); + let cb = stream_watcher.get_watcher_data().connect_cb.get_ref(); let status = status_to_maybe_uv_error(handle, status); (*cb)(stream_watcher, status); } @@ -314,8 +291,7 @@ impl UdpWatcher { data.udp_recv_cb = Some(cb); } - let handle = self.native_handle(); - unsafe { uvll::udp_recv_start(handle, alloc_cb, recv_cb); } + unsafe { uvll::udp_recv_start(self.native_handle(), alloc_cb, recv_cb); } extern fn alloc_cb(handle: *uvll::uv_udp_t, suggested_size: size_t) -> Buf { let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); @@ -331,17 +307,14 @@ impl UdpWatcher { rtdebug!("buf addr: %x", buf.base as uint); rtdebug!("buf len: %d", buf.len as int); let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); - let data = udp_watcher.get_watcher_data(); - let cb = data.udp_recv_cb.get_ref(); + let cb = udp_watcher.get_watcher_data().udp_recv_cb.get_ref(); let status = status_to_maybe_uv_error(handle, nread as c_int); - let address = uv_ip4_to_ip4(addr); - (*cb)(udp_watcher, nread as int, buf, address, flags as uint, status); + (*cb)(udp_watcher, nread as int, buf, uv_ip4_to_ip4(addr), flags as uint, status); } } pub fn recv_stop(&self) { - let handle = self.native_handle(); - unsafe { uvll::udp_recv_stop(handle); } + unsafe { uvll::udp_recv_stop(self.native_handle()); } } pub fn send(&self, buf: Buf, address: IpAddr, cb: UdpSendCallback) { @@ -357,7 +330,7 @@ impl UdpWatcher { Ipv4(*) => { do ip4_as_uv_ip4(address) |addr| { unsafe { - assert!(0 == uvll::udp_send(req.native_handle(), + assert_eq!(0, uvll::udp_send(req.native_handle(), self.native_handle(), [buf], addr, send_cb)); } @@ -411,12 +384,9 @@ impl Request for ConnectRequest { } impl ConnectRequest { fn new() -> ConnectRequest { - let connect_handle = unsafe { - malloc_req(UV_CONNECT) - }; + let connect_handle = unsafe { malloc_req(UV_CONNECT) }; assert!(connect_handle.is_not_null()); - let connect_handle = connect_handle as *uvll::uv_connect_t; - ConnectRequest(connect_handle) + ConnectRequest(connect_handle as *uvll::uv_connect_t) } fn stream(&self) -> StreamWatcher { @@ -432,8 +402,7 @@ impl ConnectRequest { } impl NativeHandle<*uvll::uv_connect_t> for ConnectRequest { - fn from_native_handle( - handle: *uvll:: uv_connect_t) -> ConnectRequest { + fn from_native_handle(handle: *uvll:: uv_connect_t) -> ConnectRequest { ConnectRequest(handle) } fn native_handle(&self) -> *uvll::uv_connect_t { @@ -447,12 +416,9 @@ impl Request for WriteRequest { } impl WriteRequest { pub fn new() -> WriteRequest { - let write_handle = unsafe { - malloc_req(UV_WRITE) - }; + let write_handle = unsafe { malloc_req(UV_WRITE) }; assert!(write_handle.is_not_null()); - let write_handle = write_handle as *uvll::uv_write_t; - WriteRequest(write_handle) + WriteRequest(write_handle as *uvll::uv_write_t) } pub fn stream(&self) -> StreamWatcher { @@ -483,16 +449,14 @@ impl UdpSendRequest { pub fn new() -> UdpSendRequest { let send_handle = unsafe { malloc_req(UV_UDP_SEND) }; assert!(send_handle.is_not_null()); - let send_handle = send_handle as *uvll::uv_udp_send_t; - UdpSendRequest(send_handle) + UdpSendRequest(send_handle as *uvll::uv_udp_send_t) } pub fn handle(&self) -> UdpWatcher { - unsafe { - NativeHandle::from_native_handle( - uvll::get_udp_handle_from_send_req( - self.native_handle())) - } + let send_request_handle = unsafe { + uvll::get_udp_handle_from_send_req(self.native_handle()) + }; + NativeHandle::from_native_handle(send_request_handle) } pub fn delete(self) { From 42f3f069fa1963cdf19117e57a83089889a64f37 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Wed, 26 Jun 2013 13:48:49 -0700 Subject: [PATCH 092/111] changed NOTE to TODO --- src/libstd/rt/uv/net.rs | 8 ++++---- src/libstd/rt/uv/uvio.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 8c5f9fdbd4a52..dc766b2d7f88b 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -37,7 +37,7 @@ pub fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { } } } - _ => fail!() // NOTE ipv6 + _ => fail!() // TODO ipv6 } } @@ -279,7 +279,7 @@ impl UdpWatcher { } } } - _ => fail!() // NOTE ipv6 + _ => fail!() // TODO ipv6 } } @@ -299,7 +299,7 @@ impl UdpWatcher { return (*alloc_cb)(suggested_size as uint); } - /* NOTE the socket address should actually be a pointer to + /* TODO the socket address should actually be a pointer to either a sockaddr_in or sockaddr_in6. In libuv, the udp_recv callback takes a struct *sockaddr */ extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, @@ -336,7 +336,7 @@ impl UdpWatcher { } } } - _ => fail!() // NOTE ipv6 + _ => fail!() // TODO ipv6 } extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 905087103fc08..1ae6cd8b17bb7 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -481,7 +481,7 @@ impl RtioUdpSocket for UvUdpSocket { let task_cell = Cell::new(task); let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; do self.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { - let _ = flags; // NOTE add handling for partials? + let _ = flags; // TODO add handling for partials? watcher.recv_stop(); From ddbccecc27e7a6be144c9c55d25a5b56cac0a179 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Wed, 26 Jun 2013 17:00:29 -0700 Subject: [PATCH 093/111] std::rt: Some cleanup --- src/libstd/rt/mod.rs | 3 +-- src/libstd/rt/sched.rs | 35 +++++++++++++++++------------------ 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index bbf1cf0d9b797..97a0285ea196e 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -267,11 +267,10 @@ pub fn run(main: ~fn()) -> int { }; // Create and enqueue the main task. - let main_cell = Cell::new(main); let mut new_task = ~Task::new_root(); new_task.on_exit = Some(on_exit); let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, - new_task, main_cell.take()); + new_task, main); scheds[0].enqueue_task(main_task); // Run each scheduler in a thread. diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index bbe4aa25e2967..26956c49a629a 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -177,7 +177,7 @@ impl Scheduler { rtdebug!("run taking sched"); let sched = Local::take::(); - // XXX: Reenable this once we're using a per-task queue. With a shared + // XXX: Reenable this once we're using a per-scheduler queue. With a shared // queue this is not true //assert!(sched.work_queue.is_empty()); rtdebug!("scheduler metrics: %s\n", { @@ -213,10 +213,10 @@ impl Scheduler { if sched.resume_task_from_queue() { // We performed a scheduling action. There may be other work // to do yet, so let's try again later. - let mut sched = Local::take::(); - sched.metrics.tasks_resumed_from_queue += 1; - sched.event_loop.callback(Scheduler::run_sched_once); - Local::put(sched); + do Local::borrow:: |sched| { + sched.metrics.tasks_resumed_from_queue += 1; + sched.event_loop.callback(Scheduler::run_sched_once); + } return; } @@ -224,18 +224,18 @@ impl Scheduler { // Generate a SchedHandle and push it to the sleeper list so // somebody can wake us up later. rtdebug!("no work to do"); - let mut sched = Local::take::(); - sched.metrics.wasted_turns += 1; - if !sched.sleepy && !sched.no_sleep { - rtdebug!("sleeping"); - sched.metrics.sleepy_times += 1; - sched.sleepy = true; - let handle = sched.make_handle(); - sched.sleeper_list.push(handle); - } else { - rtdebug!("not sleeping"); + do Local::borrow:: |sched| { + sched.metrics.wasted_turns += 1; + if !sched.sleepy && !sched.no_sleep { + rtdebug!("sleeping"); + sched.metrics.sleepy_times += 1; + sched.sleepy = true; + let handle = sched.make_handle(); + sched.sleeper_list.push(handle); + } else { + rtdebug!("not sleeping"); + } } - Local::put(sched); } pub fn make_handle(&mut self) -> SchedHandle { @@ -462,8 +462,7 @@ impl Scheduler { // here we know we are home, execute now OR we know we // aren't homed, and that this sched doesn't care do this.switch_running_tasks_and_then(task) |sched, last_task| { - let last_task = Cell::new(last_task); - sched.enqueue_task(last_task.take()); + sched.enqueue_task(last_task); } } else if !homed && !this.run_anything { // the task isn't homed, but it can't be run here From 5cfad4b6de3a9ab749c975338c23fc2e20b0beec Mon Sep 17 00:00:00 2001 From: toddaaro Date: Wed, 26 Jun 2013 16:41:00 -0700 Subject: [PATCH 094/111] Refactored the runtime to view coroutines as a component of tasks, instead of tasks as a component of coroutines. --- src/libstd/rt/comm.rs | 12 +- src/libstd/rt/join_latch.rs | 1 + src/libstd/rt/local.rs | 9 +- src/libstd/rt/mod.rs | 13 +- src/libstd/rt/sched.rs | 406 ++++++------------------------------ src/libstd/rt/task.rs | 176 +++++++++++++++- src/libstd/rt/test.rs | 277 +++++++++--------------- src/libstd/rt/tube.rs | 5 +- src/libstd/rt/uv/uvio.rs | 5 +- src/libstd/task/mod.rs | 1 + src/libstd/task/spawn.rs | 27 ++- src/libstd/unstable/lang.rs | 4 + 12 files changed, 386 insertions(+), 550 deletions(-) diff --git a/src/libstd/rt/comm.rs b/src/libstd/rt/comm.rs index dd27c03ff5164..7608bc89e021f 100644 --- a/src/libstd/rt/comm.rs +++ b/src/libstd/rt/comm.rs @@ -20,7 +20,8 @@ use cast; use util; use ops::Drop; use kinds::Owned; -use rt::sched::{Scheduler, Coroutine}; +use rt::sched::{Scheduler}; +use rt::task::Task; use rt::local::Local; use unstable::atomics::{AtomicUint, AtomicOption, SeqCst}; use unstable::sync::UnsafeAtomicRcBox; @@ -136,7 +137,7 @@ impl ChanOne { } task_as_state => { // Port is blocked. Wake it up. - let recvr: ~Coroutine = cast::transmute(task_as_state); + let recvr: ~Task = cast::transmute(task_as_state); let mut sched = Local::take::(); rtdebug!("rendezvous send"); sched.metrics.rendezvous_sends += 1; @@ -192,7 +193,7 @@ impl PortOne { // NB: We have to drop back into the scheduler event loop here // instead of switching immediately back or we could end up // triggering infinite recursion on the scheduler's stack. - let task: ~Coroutine = cast::transmute(task_as_state); + let task: ~Task = cast::transmute(task_as_state); sched.enqueue_task(task); } _ => util::unreachable() @@ -257,7 +258,7 @@ impl Drop for ChanOneHack { task_as_state => { // The port is blocked waiting for a message we will never send. Wake it. assert!((*this.packet()).payload.is_none()); - let recvr: ~Coroutine = cast::transmute(task_as_state); + let recvr: ~Task = cast::transmute(task_as_state); let sched = Local::take::(); sched.schedule_task(recvr); } @@ -554,6 +555,8 @@ mod test { { let _c = chan; } port.recv(); }; + // What is our res? + rtdebug!("res is: %?", res.is_err()); assert!(res.is_err()); } } @@ -905,4 +908,5 @@ mod test { } } } + } diff --git a/src/libstd/rt/join_latch.rs b/src/libstd/rt/join_latch.rs index ad5cf2eb378c5..79c0d5da9a4f5 100644 --- a/src/libstd/rt/join_latch.rs +++ b/src/libstd/rt/join_latch.rs @@ -643,3 +643,4 @@ mod test { } } } + diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index 6df1ffaa453f3..374933ab281b8 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -13,6 +13,7 @@ use rt::sched::Scheduler; use rt::task::Task; use rt::local_ptr; use rt::rtio::{EventLoop, IoFactoryObject}; +//use borrow::to_uint; pub trait Local { fn put(value: ~Self); @@ -32,6 +33,7 @@ impl Local for Scheduler { let res_ptr: *mut Option = &mut res; unsafe { do local_ptr::borrow |sched| { +// rtdebug!("successfully unsafe borrowed sched pointer"); let result = f(sched); *res_ptr = Some(result); } @@ -51,9 +53,12 @@ impl Local for Task { fn exists() -> bool { rtabort!("unimpl") } fn borrow(f: &fn(&mut Task) -> T) -> T { do Local::borrow:: |sched| { +// rtdebug!("sched about to grab current_task"); match sched.current_task { Some(~ref mut task) => { - f(&mut *task.task) +// rtdebug!("current task pointer: %x", to_uint(task)); +// rtdebug!("current task heap pointer: %x", to_uint(&task.heap)); + f(task) } None => { rtabort!("no scheduler") @@ -64,7 +69,7 @@ impl Local for Task { unsafe fn unsafe_borrow() -> *mut Task { match (*Local::unsafe_borrow::()).current_task { Some(~ref mut task) => { - let s: *mut Task = &mut *task.task; + let s: *mut Task = &mut *task; return s; } None => { diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index bbf1cf0d9b797..aae194ae54838 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -67,7 +67,7 @@ use iter::Times; use iterator::IteratorUtil; use option::Some; use ptr::RawPtr; -use rt::sched::{Scheduler, Coroutine, Shutdown}; +use rt::sched::{Scheduler, Shutdown}; use rt::sleeper_list::SleeperList; use rt::task::Task; use rt::thread::Thread; @@ -268,10 +268,9 @@ pub fn run(main: ~fn()) -> int { // Create and enqueue the main task. let main_cell = Cell::new(main); - let mut new_task = ~Task::new_root(); - new_task.on_exit = Some(on_exit); - let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, - new_task, main_cell.take()); + let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, + main_cell.take()); + main_task.on_exit = Some(on_exit); scheds[0].enqueue_task(main_task); // Run each scheduler in a thread. @@ -348,7 +347,7 @@ pub fn context() -> RuntimeContext { #[test] fn test_context() { use unstable::run_in_bare_thread; - use self::sched::{Scheduler, Coroutine}; + use self::sched::{Scheduler}; use rt::local::Local; use rt::test::new_test_uv_sched; @@ -356,7 +355,7 @@ fn test_context() { do run_in_bare_thread { assert_eq!(context(), GlobalContext); let mut sched = ~new_test_uv_sched(); - let task = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task = ~do Task::new_root(&mut sched.stack_pool) { assert_eq!(context(), TaskContext); let sched = Local::take::(); do sched.deschedule_running_task_and_then() |sched, task| { diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index bbe4aa25e2967..ed5cce4b35ca3 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -16,19 +16,15 @@ use clone::Clone; use super::sleeper_list::SleeperList; use super::work_queue::WorkQueue; -use super::stack::{StackPool, StackSegment}; +use super::stack::{StackPool}; use super::rtio::{EventLoop, EventLoopObject, RemoteCallbackObject}; use super::context::Context; -use super::task::Task; +use super::task::{Task, AnySched, Sched}; use super::message_queue::MessageQueue; use rt::local_ptr; use rt::local::Local; use rt::rtio::RemoteCallback; use rt::metrics::SchedMetrics; - -//use to_str::ToStr; - -/// To allow for using pointers as scheduler ids use borrow::{to_uint}; /// The Scheduler is responsible for coordinating execution of Coroutines @@ -41,7 +37,7 @@ use borrow::{to_uint}; pub struct Scheduler { /// A queue of available work. Under a work-stealing policy there /// is one per Scheduler. - priv work_queue: WorkQueue<~Coroutine>, + priv work_queue: WorkQueue<~Task>, /// The queue of incoming messages from other schedulers. /// These are enqueued by SchedHandles after which a remote callback /// is triggered to handle the message. @@ -66,7 +62,7 @@ pub struct Scheduler { /// Always valid when a task is executing, otherwise not priv saved_context: Context, /// The currently executing task - current_task: Option<~Coroutine>, + current_task: Option<~Task>, /// An action performed after a context switch on behalf of the /// code running before the context switch priv cleanup_job: Option, @@ -81,33 +77,15 @@ pub struct SchedHandle { sched_id: uint } -pub struct Coroutine { - /// The segment of stack on which the task is currently running or, - /// if the task is blocked, on which the task will resume execution - priv current_stack_segment: StackSegment, - /// These are always valid when the task is not running, unless - /// the task is dead - priv saved_context: Context, - /// The heap, GC, unwinding, local storage, logging - task: ~Task, -} - -// A scheduler home is either a handle to the home scheduler, or an -// explicit "AnySched". -pub enum SchedHome { - AnySched, - Sched(SchedHandle) -} - pub enum SchedMessage { Wake, Shutdown, - PinnedTask(~Coroutine) + PinnedTask(~Task) } enum CleanupJob { DoNothing, - GiveTask(~Coroutine, UnsafeTaskReceiver) + GiveTask(~Task, UnsafeTaskReceiver) } impl Scheduler { @@ -116,7 +94,7 @@ impl Scheduler { pub fn sched_id(&self) -> uint { to_uint(self) } pub fn new(event_loop: ~EventLoopObject, - work_queue: WorkQueue<~Coroutine>, + work_queue: WorkQueue<~Task>, sleeper_list: SleeperList) -> Scheduler { @@ -125,7 +103,7 @@ impl Scheduler { } pub fn new_special(event_loop: ~EventLoopObject, - work_queue: WorkQueue<~Coroutine>, + work_queue: WorkQueue<~Task>, sleeper_list: SleeperList, run_anything: bool) -> Scheduler { @@ -253,7 +231,7 @@ impl Scheduler { /// Pushes the task onto the work stealing queue and tells the /// event loop to run it later. Always use this instead of pushing /// to the work queue directly. - pub fn enqueue_task(&mut self, task: ~Coroutine) { + pub fn enqueue_task(&mut self, task: ~Task) { // We don't want to queue tasks that belong on other threads, // so we send them home at enqueue time. @@ -307,7 +285,7 @@ impl Scheduler { rtdebug!("recv BiasedTask message in sched: %u", this.sched_id()); let mut task = task; - task.task.home = Some(Sched(this.make_handle())); + task.home = Some(Sched(this.make_handle())); this.resume_task_immediately(task); return true; } @@ -349,9 +327,9 @@ impl Scheduler { } /// Given an input Coroutine sends it back to its home scheduler. - fn send_task_home(task: ~Coroutine) { + fn send_task_home(task: ~Task) { let mut task = task; - let mut home = task.task.home.swap_unwrap(); + let mut home = task.home.swap_unwrap(); match home { Sched(ref mut home_handle) => { home_handle.send(PinnedTask(task)); @@ -377,7 +355,7 @@ impl Scheduler { match this.work_queue.pop() { Some(task) => { let action_id = { - let home = &task.task.home; + let home = &task.home; match home { &Some(Sched(ref home_handle)) if home_handle.sched_id != this.sched_id() => { @@ -440,14 +418,15 @@ impl Scheduler { rtdebug!("ending running task"); do self.deschedule_running_task_and_then |sched, dead_task| { - let dead_task = Cell::new(dead_task); - dead_task.take().recycle(&mut sched.stack_pool); + let mut dead_task = dead_task; + let coroutine = dead_task.coroutine.swap_unwrap(); + coroutine.recycle(&mut sched.stack_pool); } rtabort!("control reached end of task"); } - pub fn schedule_task(~self, task: ~Coroutine) { + pub fn schedule_task(~self, task: ~Task) { assert!(self.in_task_context()); // is the task home? @@ -478,7 +457,7 @@ impl Scheduler { // Core scheduling ops - pub fn resume_task_immediately(~self, task: ~Coroutine) { + pub fn resume_task_immediately(~self, task: ~Task) { let mut this = self; assert!(!this.in_task_context()); @@ -521,7 +500,7 @@ impl Scheduler { /// This passes a Scheduler pointer to the fn after the context switch /// in order to prevent that fn from performing further scheduling operations. /// Doing further scheduling could easily result in infinite recursion. - pub fn deschedule_running_task_and_then(~self, f: &fn(&mut Scheduler, ~Coroutine)) { + pub fn deschedule_running_task_and_then(~self, f: &fn(&mut Scheduler, ~Task)) { let mut this = self; assert!(this.in_task_context()); @@ -530,8 +509,8 @@ impl Scheduler { unsafe { let blocked_task = this.current_task.swap_unwrap(); - let f_fake_region = transmute::<&fn(&mut Scheduler, ~Coroutine), - &fn(&mut Scheduler, ~Coroutine)>(f); + let f_fake_region = transmute::<&fn(&mut Scheduler, ~Task), + &fn(&mut Scheduler, ~Task)>(f); let f_opaque = ClosureConverter::from_fn(f_fake_region); this.enqueue_cleanup_job(GiveTask(blocked_task, f_opaque)); } @@ -553,8 +532,8 @@ impl Scheduler { /// Switch directly to another task, without going through the scheduler. /// You would want to think hard about doing this, e.g. if there are /// pending I/O events it would be a bad idea. - pub fn switch_running_tasks_and_then(~self, next_task: ~Coroutine, - f: &fn(&mut Scheduler, ~Coroutine)) { + pub fn switch_running_tasks_and_then(~self, next_task: ~Task, + f: &fn(&mut Scheduler, ~Task)) { let mut this = self; assert!(this.in_task_context()); @@ -563,8 +542,8 @@ impl Scheduler { let old_running_task = this.current_task.swap_unwrap(); let f_fake_region = unsafe { - transmute::<&fn(&mut Scheduler, ~Coroutine), - &fn(&mut Scheduler, ~Coroutine)>(f) + transmute::<&fn(&mut Scheduler, ~Task), + &fn(&mut Scheduler, ~Task)>(f) }; let f_opaque = ClosureConverter::from_fn(f_fake_region); this.enqueue_cleanup_job(GiveTask(old_running_task, f_opaque)); @@ -631,12 +610,22 @@ impl Scheduler { // because borrowck thinks the three patterns are conflicting // borrows unsafe { - let last_task = transmute::, Option<&mut Coroutine>>(last_task); + let last_task = transmute::, Option<&mut Task>>(last_task); let last_task_context = match last_task { - Some(t) => Some(&mut t.saved_context), None => None + Some(t) => { + Some(&mut t.coroutine.get_mut_ref().saved_context) + } + None => { + None + } }; let next_task_context = match self.current_task { - Some(ref mut t) => Some(&mut t.saved_context), None => None + Some(ref mut t) => { + Some(&mut t.coroutine.get_mut_ref().saved_context) + } + None => { + None + } }; // XXX: These transmutes can be removed after snapshot return (transmute(&mut self.saved_context), @@ -661,186 +650,34 @@ impl SchedHandle { } } -impl Coroutine { - - /// This function checks that a coroutine is running "home". - pub fn is_home(&self) -> bool { - rtdebug!("checking if coroutine is home"); - do Local::borrow:: |sched| { - match self.task.home { - Some(AnySched) => { false } - Some(Sched(SchedHandle { sched_id: ref id, _ })) => { - *id == sched.sched_id() - } - None => { rtabort!("error: homeless task!"); } - } - } - } - - /// Without access to self, but with access to the "expected home - /// id", see if we are home. - fn is_home_using_id(id: uint) -> bool { - rtdebug!("checking if coroutine is home using id"); - do Local::borrow:: |sched| { - if sched.sched_id() == id { - true - } else { - false - } - } - } - - /// Check if this coroutine has a home - fn homed(&self) -> bool { - rtdebug!("checking if this coroutine has a home"); - match self.task.home { - Some(AnySched) => { false } - Some(Sched(_)) => { true } - None => { rtabort!("error: homeless task!"); - } - } - } - - /// A version of is_home that does not need to use TLS, it instead - /// takes local scheduler as a parameter. - fn is_home_no_tls(&self, sched: &~Scheduler) -> bool { - rtdebug!("checking if coroutine is home without tls"); - match self.task.home { - Some(AnySched) => { true } - Some(Sched(SchedHandle { sched_id: ref id, _})) => { - *id == sched.sched_id() - } - None => { rtabort!("error: homeless task!"); } - } - } - - /// Check TLS for the scheduler to see if we are on a special - /// scheduler. - pub fn on_special() -> bool { - rtdebug!("checking if coroutine is executing on special sched"); - do Local::borrow::() |sched| { - !sched.run_anything - } - } - - // Created new variants of "new" that takes a home scheduler - // parameter. The original with_task now calls with_task_homed - // using the AnySched paramter. - - pub fn new_homed(stack_pool: &mut StackPool, home: SchedHome, start: ~fn()) -> Coroutine { - Coroutine::with_task_homed(stack_pool, ~Task::new_root(), start, home) - } - - pub fn new_root(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { - Coroutine::with_task(stack_pool, ~Task::new_root(), start) - } - - pub fn with_task_homed(stack_pool: &mut StackPool, - task: ~Task, - start: ~fn(), - home: SchedHome) -> Coroutine { - - static MIN_STACK_SIZE: uint = 1000000; // XXX: Too much stack - - let start = Coroutine::build_start_wrapper(start); - let mut stack = stack_pool.take_segment(MIN_STACK_SIZE); - // NB: Context holds a pointer to that ~fn - let initial_context = Context::new(start, &mut stack); - let mut crt = Coroutine { - current_stack_segment: stack, - saved_context: initial_context, - task: task, - }; - crt.task.home = Some(home); - return crt; - } - - pub fn with_task(stack_pool: &mut StackPool, - task: ~Task, - start: ~fn()) -> Coroutine { - Coroutine::with_task_homed(stack_pool, - task, - start, - AnySched) - } - - fn build_start_wrapper(start: ~fn()) -> ~fn() { - // XXX: The old code didn't have this extra allocation - let start_cell = Cell::new(start); - let wrapper: ~fn() = || { - // This is the first code to execute after the initial - // context switch to the task. The previous context may - // have asked us to do some cleanup. - unsafe { - let sched = Local::unsafe_borrow::(); - (*sched).run_cleanup_job(); - - let sched = Local::unsafe_borrow::(); - let task = (*sched).current_task.get_mut_ref(); - // FIXME #6141: shouldn't neet to put `start()` in - // another closure - let start_cell = Cell::new(start_cell.take()); - do task.task.run { - // N.B. Removing `start` from the start wrapper - // closure by emptying a cell is critical for - // correctness. The ~Task pointer, and in turn the - // closure used to initialize the first call - // frame, is destroyed in scheduler context, not - // task context. So any captured closures must - // not contain user-definable dtors that expect to - // be in task context. By moving `start` out of - // the closure, all the user code goes out of - // scope while the task is still running. - let start = start_cell.take(); - start(); - }; - } - - let sched = Local::take::(); - sched.terminate_current_task(); - }; - return wrapper; - } - - /// Destroy the task and try to reuse its components - pub fn recycle(~self, stack_pool: &mut StackPool) { - match self { - ~Coroutine {current_stack_segment, _} => { - stack_pool.give_segment(current_stack_segment); - } - } - } -} - // XXX: Some hacks to put a &fn in Scheduler without borrowck // complaining type UnsafeTaskReceiver = sys::Closure; trait ClosureConverter { - fn from_fn(&fn(&mut Scheduler, ~Coroutine)) -> Self; - fn to_fn(self) -> &fn(&mut Scheduler, ~Coroutine); + fn from_fn(&fn(&mut Scheduler, ~Task)) -> Self; + fn to_fn(self) -> &fn(&mut Scheduler, ~Task); } impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: &fn(&mut Scheduler, ~Coroutine)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } - fn to_fn(self) -> &fn(&mut Scheduler, ~Coroutine) { unsafe { transmute(self) } } + fn from_fn(f: &fn(&mut Scheduler, ~Task)) -> UnsafeTaskReceiver { unsafe { transmute(f) } } + fn to_fn(self) -> &fn(&mut Scheduler, ~Task) { unsafe { transmute(self) } } } + #[cfg(test)] mod test { use int; use cell::Cell; - use iterator::IteratorUtil; use unstable::run_in_bare_thread; use task::spawn; use rt::local::Local; use rt::test::*; use super::*; use rt::thread::Thread; - use ptr::to_uint; - use vec::MutableVector; + use borrow::to_uint; + use rt::task::{Task,Sched}; // Confirm that a sched_id actually is the uint form of the // pointer to the scheduler struct. - #[test] fn simple_sched_id_test() { do run_in_bare_thread { @@ -851,7 +688,6 @@ mod test { // Compare two scheduler ids that are different, this should never // fail but may catch a mistake someday. - #[test] fn compare_sched_id_test() { do run_in_bare_thread { @@ -863,7 +699,6 @@ mod test { // A simple test to check if a homed task run on a single // scheduler ends up executing while home. - #[test] fn test_home_sched() { do run_in_bare_thread { @@ -874,8 +709,8 @@ mod test { let sched_handle = sched.make_handle(); let sched_id = sched.sched_id(); - let task = ~do Coroutine::new_homed(&mut sched.stack_pool, - Sched(sched_handle)) { + let task = ~do Task::new_root_homed(&mut sched.stack_pool, + Sched(sched_handle)) { unsafe { *task_ran_ptr = true }; let sched = Local::take::(); assert!(sched.sched_id() == sched_id); @@ -888,7 +723,6 @@ mod test { } // A test for each state of schedule_task - #[test] fn test_schedule_home_states() { @@ -898,7 +732,6 @@ mod test { use rt::work_queue::WorkQueue; do run_in_bare_thread { -// let nthreads = 2; let sleepers = SleeperList::new(); let work_queue = WorkQueue::new(); @@ -924,33 +757,33 @@ mod test { let t1_handle = special_sched.make_handle(); let t4_handle = special_sched.make_handle(); - let t1f = ~do Coroutine::new_homed(&mut special_sched.stack_pool, - Sched(t1_handle)) { - let is_home = Coroutine::is_home_using_id(special_id); + let t1f = ~do Task::new_root_homed(&mut special_sched.stack_pool, + Sched(t1_handle)) || { + let is_home = Task::is_home_using_id(special_id); rtdebug!("t1 should be home: %b", is_home); assert!(is_home); }; let t1f = Cell::new(t1f); - let t2f = ~do Coroutine::new_root(&mut normal_sched.stack_pool) { - let on_special = Coroutine::on_special(); + let t2f = ~do Task::new_root(&mut normal_sched.stack_pool) { + let on_special = Task::on_special(); rtdebug!("t2 should not be on special: %b", on_special); assert!(!on_special); }; let t2f = Cell::new(t2f); - let t3f = ~do Coroutine::new_root(&mut normal_sched.stack_pool) { + let t3f = ~do Task::new_root(&mut normal_sched.stack_pool) { // not on special - let on_special = Coroutine::on_special(); + let on_special = Task::on_special(); rtdebug!("t3 should not be on special: %b", on_special); assert!(!on_special); }; let t3f = Cell::new(t3f); - let t4f = ~do Coroutine::new_homed(&mut special_sched.stack_pool, - Sched(t4_handle)) { + let t4f = ~do Task::new_root_homed(&mut special_sched.stack_pool, + Sched(t4_handle)) { // is home - let home = Coroutine::is_home_using_id(special_id); + let home = Task::is_home_using_id(special_id); rtdebug!("t4 should be home: %b", home); assert!(home); }; @@ -988,7 +821,7 @@ mod test { let t4 = Cell::new(t4); // build a main task that runs our four tests - let main_task = ~do Coroutine::new_root(&mut normal_sched.stack_pool) { + let main_task = ~do Task::new_root(&mut normal_sched.stack_pool) { // the two tasks that require a normal start location t2.take()(); t4.take()(); @@ -997,7 +830,7 @@ mod test { }; // task to run the two "special start" tests - let special_task = ~do Coroutine::new_homed( + let special_task = ~do Task::new_root_homed( &mut special_sched.stack_pool, Sched(special_handle2.take())) { t1.take()(); @@ -1027,91 +860,7 @@ mod test { } } - // The following test is a bit of a mess, but it trys to do - // something tricky so I'm not sure how to get around this in the - // short term. - - // A number of schedulers are created, and then a task is created - // and assigned a home scheduler. It is then "started" on a - // different scheduler. The scheduler it is started on should - // observe that the task is not home, and send it home. - - // This test is light in that it does very little. - - #[test] - fn test_transfer_task_home() { - - use rt::uv::uvio::UvEventLoop; - use rt::sched::Shutdown; - use rt::sleeper_list::SleeperList; - use rt::work_queue::WorkQueue; - use uint; - use container::Container; - use vec::OwnedVector; - - do run_in_bare_thread { - - static N: uint = 8; - - let sleepers = SleeperList::new(); - let work_queue = WorkQueue::new(); - - let mut handles = ~[]; - let mut scheds = ~[]; - - for uint::range(0, N) |_| { - let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new(loop_, - work_queue.clone(), - sleepers.clone()); - let handle = sched.make_handle(); - rtdebug!("sched id: %u", handle.sched_id); - handles.push(handle); - scheds.push(sched); - }; - - let handles = Cell::new(handles); - - let home_handle = scheds[6].make_handle(); - let home_id = home_handle.sched_id; - let home = Sched(home_handle); - - let main_task = ~do Coroutine::new_homed(&mut scheds[1].stack_pool, home) { - - // Here we check if the task is running on its home. - let sched = Local::take::(); - rtdebug!("run location scheduler id: %u, home: %u", - sched.sched_id(), - home_id); - assert!(sched.sched_id() == home_id); - Local::put::(sched); - - let mut handles = handles.take(); - for handles.mut_iter().advance |handle| { - handle.send(Shutdown); - } - }; - - scheds[0].enqueue_task(main_task); - - let mut threads = ~[]; - - while !scheds.is_empty() { - let sched = scheds.pop(); - let sched_cell = Cell::new(sched); - let thread = do Thread::start { - let sched = sched_cell.take(); - sched.run(); - }; - threads.push(thread); - } - - let _threads = threads; - } - } - // Do it a lot - #[test] fn test_stress_schedule_task_states() { let n = stress_factor() * 120; @@ -1120,21 +869,6 @@ mod test { } } - // The goal is that this is the high-stress test for making sure - // homing is working. It allocates RUST_RT_STRESS tasks that - // do nothing but assert that they are home at execution - // time. These tasks are queued to random schedulers, so sometimes - // they are home and sometimes not. It also runs RUST_RT_STRESS - // times. - - #[test] - fn test_stress_homed_tasks() { - let n = stress_factor(); - for int::range(0,n as int) |_| { - run_in_mt_newsched_task_random_homed(); - } - } - #[test] fn test_simple_scheduling() { do run_in_bare_thread { @@ -1142,7 +876,7 @@ mod test { let task_ran_ptr: *mut bool = &mut task_ran; let mut sched = ~new_test_uv_sched(); - let task = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task = ~do Task::new_root(&mut sched.stack_pool) { unsafe { *task_ran_ptr = true; } }; sched.enqueue_task(task); @@ -1160,7 +894,7 @@ mod test { let mut sched = ~new_test_uv_sched(); for int::range(0, total) |_| { - let task = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task = ~do Task::new_root(&mut sched.stack_pool) { unsafe { *task_count_ptr = *task_count_ptr + 1; } }; sched.enqueue_task(task); @@ -1177,10 +911,10 @@ mod test { let count_ptr: *mut int = &mut count; let mut sched = ~new_test_uv_sched(); - let task1 = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task1 = ~do Task::new_root(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; } let mut sched = Local::take::(); - let task2 = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task2 = ~do Task::new_root(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; } }; // Context switch directly to the new task @@ -1205,7 +939,7 @@ mod test { let mut sched = ~new_test_uv_sched(); - let start_task = ~do Coroutine::new_root(&mut sched.stack_pool) { + let start_task = ~do Task::new_root(&mut sched.stack_pool) { run_task(count_ptr); }; sched.enqueue_task(start_task); @@ -1215,7 +949,7 @@ mod test { fn run_task(count_ptr: *mut int) { do Local::borrow:: |sched| { - let task = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task = ~do Task::new_root(&mut sched.stack_pool) { unsafe { *count_ptr = *count_ptr + 1; if *count_ptr != MAX { @@ -1233,7 +967,7 @@ mod test { fn test_block_task() { do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); - let task = ~do Coroutine::new_root(&mut sched.stack_pool) { + let task = ~do Task::new_root(&mut sched.stack_pool) { let sched = Local::take::(); assert!(sched.in_task_context()); do sched.deschedule_running_task_and_then() |sched, task| { @@ -1280,13 +1014,13 @@ mod test { let mut sched1 = ~new_test_uv_sched(); let handle1 = sched1.make_handle(); let handle1_cell = Cell::new(handle1); - let task1 = ~do Coroutine::new_root(&mut sched1.stack_pool) { + let task1 = ~do Task::new_root(&mut sched1.stack_pool) { chan_cell.take().send(()); }; sched1.enqueue_task(task1); let mut sched2 = ~new_test_uv_sched(); - let task2 = ~do Coroutine::new_root(&mut sched2.stack_pool) { + let task2 = ~do Task::new_root(&mut sched2.stack_pool) { port_cell.take().recv(); // Release the other scheduler's handle so it can exit handle1_cell.take(); @@ -1383,7 +1117,6 @@ mod test { } } } - } #[test] @@ -1408,5 +1141,4 @@ mod test { } } } - -} +} \ No newline at end of file diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 97c3b6a749bc9..333eaa5391220 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -23,8 +23,11 @@ use option::{Option, Some, None}; use rt::local::Local; use rt::logging::StdErrLogger; use super::local_heap::LocalHeap; -use rt::sched::{SchedHome, AnySched}; +use rt::sched::{Scheduler, SchedHandle}; use rt::join_latch::JoinLatch; +use rt::stack::{StackSegment, StackPool}; +use rt::context::Context; +use cell::Cell; pub struct Task { heap: LocalHeap, @@ -35,7 +38,22 @@ pub struct Task { home: Option, join_latch: Option<~JoinLatch>, on_exit: Option<~fn(bool)>, - destroyed: bool + destroyed: bool, + coroutine: Option<~Coroutine> +} + +pub struct Coroutine { + /// The segment of stack on which the task is currently running or + /// if the task is blocked, on which the task will resume + /// execution. + priv current_stack_segment: StackSegment, + /// Always valid if the task is alive and not running. + saved_context: Context +} + +pub enum SchedHome { + AnySched, + Sched(SchedHandle) } pub struct GarbageCollector; @@ -46,31 +64,50 @@ pub struct Unwinder { } impl Task { - pub fn new_root() -> Task { + + pub fn new_root(stack_pool: &mut StackPool, + start: ~fn()) -> Task { + Task::new_root_homed(stack_pool, AnySched, start) + } + + pub fn new_child(&mut self, + stack_pool: &mut StackPool, + start: ~fn()) -> Task { + self.new_child_homed(stack_pool, AnySched, start) + } + + pub fn new_root_homed(stack_pool: &mut StackPool, + home: SchedHome, + start: ~fn()) -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, unwinder: Unwinder { unwinding: false }, - home: Some(AnySched), + home: Some(home), join_latch: Some(JoinLatch::new_root()), on_exit: None, - destroyed: false + destroyed: false, + coroutine: Some(~Coroutine::new(stack_pool, start)) } } - pub fn new_child(&mut self) -> Task { + pub fn new_child_homed(&mut self, + stack_pool: &mut StackPool, + home: SchedHome, + start: ~fn()) -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(ptr::null(), None), logger: StdErrLogger, - home: Some(AnySched), + home: Some(home), unwinder: Unwinder { unwinding: false }, join_latch: Some(self.join_latch.get_mut_ref().new_child()), on_exit: None, - destroyed: false + destroyed: false, + coroutine: Some(~Coroutine::new(stack_pool, start)) } } @@ -108,11 +145,11 @@ impl Task { /// called unsafely, without removing Task from /// thread-local-storage. fn destroy(&mut self) { - // This is just an assertion that `destroy` was called unsafely - // and this instance of Task is still accessible. + do Local::borrow:: |task| { assert!(borrow::ref_eq(task, self)); } + match self.storage { LocalStorage(ptr, Some(ref dtor)) => { (*dtor)(ptr) @@ -125,12 +162,129 @@ impl Task { self.destroyed = true; } + + /// Check if *task* is currently home. + pub fn is_home(&self) -> bool { + do Local::borrow:: |sched| { + match self.home { + Some(AnySched) => { false } + Some(Sched(SchedHandle { sched_id: ref id, _ })) => { + *id == sched.sched_id() + } + None => { rtabort!("task home of None") } + } + } + } + + pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool { + match self.home { + Some(AnySched) => { false } + Some(Sched(SchedHandle { sched_id: ref id, _ })) => { + *id == sched.sched_id() + } + None => {rtabort!("task home of None") } + } + } + + pub fn is_home_using_id(sched_id: uint) -> bool { + do Local::borrow:: |task| { + match task.home { + Some(Sched(SchedHandle { sched_id: ref id, _ })) => { + *id == sched_id + } + Some(AnySched) => { false } + None => { rtabort!("task home of None") } + } + } + } + + /// Check if this *task* has a home. + pub fn homed(&self) -> bool { + match self.home { + Some(AnySched) => { false } + Some(Sched(_)) => { true } + None => { + rtabort!("task home of None") + } + } + } + + /// On a special scheduler? + pub fn on_special() -> bool { + do Local::borrow:: |sched| { + sched.run_anything + } + } + } impl Drop for Task { fn finalize(&self) { assert!(self.destroyed) } } +// Coroutines represent nothing more than a context and a stack +// segment. + +impl Coroutine { + + pub fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine { + static MIN_STACK_SIZE: uint = 100000; // XXX: Too much stack + + let start = Coroutine::build_start_wrapper(start); + let mut stack = stack_pool.take_segment(MIN_STACK_SIZE); + let initial_context = Context::new(start, &mut stack); + Coroutine { + current_stack_segment: stack, + saved_context: initial_context + } + } + + fn build_start_wrapper(start: ~fn()) -> ~fn() { + let start_cell = Cell::new(start); + let wrapper: ~fn() = || { + // First code after swap to this new context. Run our + // cleanup job. + unsafe { + let sched = Local::unsafe_borrow::(); + (*sched).run_cleanup_job(); + + let sched = Local::unsafe_borrow::(); + let task = (*sched).current_task.get_mut_ref(); + + do task.run { + // N.B. Removing `start` from the start wrapper + // closure by emptying a cell is critical for + // correctness. The ~Task pointer, and in turn the + // closure used to initialize the first call + // frame, is destroyed in the scheduler context, + // not task context. So any captured closures must + // not contain user-definable dtors that expect to + // be in task context. By moving `start` out of + // the closure, all the user code goes our of + // scope while the task is still running. + let start = start_cell.take(); + start(); + }; + } + + let sched = Local::take::(); + sched.terminate_current_task(); + }; + return wrapper; + } + + /// Destroy coroutine and try to reuse stack segment. + pub fn recycle(~self, stack_pool: &mut StackPool) { + match self { + ~Coroutine { current_stack_segment, _ } => { + stack_pool.give_segment(current_stack_segment); + } + } + } + +} + + // Just a sanity check to make sure we are catching a Rust-thrown exception static UNWIND_TOKEN: uintptr_t = 839147; @@ -209,8 +363,10 @@ mod test { fn unwind() { do run_in_newsched_task() { let result = spawntask_try(||()); + rtdebug!("trying first assert"); assert!(result.is_ok()); let result = spawntask_try(|| fail!()); + rtdebug!("trying second assert"); assert!(result.is_err()); } } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index b0e4968401474..659c7eb498573 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -8,30 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use cell::Cell; use uint; use option::{Some, None}; -use cell::Cell; -use clone::Clone; -use container::Container; -use iterator::IteratorUtil; -use vec::{OwnedVector, MutableVector}; -use result::{Result, Ok, Err}; -use unstable::run_in_bare_thread; +use rt::sched::Scheduler; use super::io::net::ip::{IpAddr, Ipv4}; -use rt::comm::oneshot; -use rt::task::Task; -use rt::thread::Thread; use rt::local::Local; -use rt::sched::{Scheduler, Coroutine}; -use rt::sleeper_list::SleeperList; +use unstable::run_in_bare_thread; +use rt::thread::Thread; +use rt::task::Task; +use rt::uv::uvio::UvEventLoop; use rt::work_queue::WorkQueue; +use rt::sleeper_list::SleeperList; +use rt::task::{Sched}; +use rt::comm::oneshot; +use result::{Result, Ok, Err}; pub fn new_test_uv_sched() -> Scheduler { - use rt::uv::uvio::UvEventLoop; - use rt::work_queue::WorkQueue; - use rt::sleeper_list::SleeperList; - let mut sched = Scheduler::new(~UvEventLoop::new(), WorkQueue::new(), SleeperList::new()); + let mut sched = Scheduler::new(~UvEventLoop::new(), + WorkQueue::new(), + SleeperList::new()); // Don't wait for the Shutdown message sched.no_sleep = true; return sched; @@ -41,19 +38,15 @@ pub fn new_test_uv_sched() -> Scheduler { /// then waits for the scheduler to exit. Failure of the task /// will abort the process. pub fn run_in_newsched_task(f: ~fn()) { - use super::sched::*; - use unstable::run_in_bare_thread; - let f = Cell::new(f); do run_in_bare_thread { let mut sched = ~new_test_uv_sched(); - let mut new_task = ~Task::new_root(); let on_exit: ~fn(bool) = |exit_status| rtassert!(exit_status); - new_task.on_exit = Some(on_exit); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - new_task, - f.take()); + let mut task = ~Task::new_root(&mut sched.stack_pool, + f.take()); + rtdebug!("newsched_task: %x", to_uint(task)); + task.on_exit = Some(on_exit); sched.enqueue_task(task); sched.run(); } @@ -65,7 +58,6 @@ pub fn run_in_newsched_task(f: ~fn()) { pub fn run_in_mt_newsched_task(f: ~fn()) { use os; use from_str::FromStr; - use rt::uv::uvio::UvEventLoop; use rt::sched::Shutdown; use rt::util; @@ -90,7 +82,9 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { for uint::range(0, nthreads) |_| { let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new(loop_, work_queue.clone(), sleepers.clone()); + let mut sched = ~Scheduler::new(loop_, + work_queue.clone(), + sleepers.clone()); let handle = sched.make_handle(); handles.push(handle); @@ -99,9 +93,7 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { let f_cell = Cell::new(f_cell.take()); let handles = Cell::new(handles); - let mut new_task = ~Task::new_root(); let on_exit: ~fn(bool) = |exit_status| { - let mut handles = handles.take(); // Tell schedulers to exit for handles.mut_iter().advance |handle| { @@ -110,9 +102,9 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { rtassert!(exit_status); }; - new_task.on_exit = Some(on_exit); - let main_task = ~Coroutine::with_task(&mut scheds[0].stack_pool, - new_task, f_cell.take()); + let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, + f_cell.take()); + main_task.on_exit = Some(on_exit); scheds[0].enqueue_task(main_task); let mut threads = ~[]; @@ -134,144 +126,44 @@ pub fn run_in_mt_newsched_task(f: ~fn()) { } -// THIS IS AWFUL. Copy-pasted the above initialization function but -// with a number of hacks to make it spawn tasks on a variety of -// schedulers with a variety of homes using the new spawn. - -pub fn run_in_mt_newsched_task_random_homed() { - use libc; - use os; - use from_str::FromStr; - use rt::uv::uvio::UvEventLoop; - use rt::sched::Shutdown; - - do run_in_bare_thread { - let nthreads = match os::getenv("RUST_TEST_THREADS") { - Some(nstr) => FromStr::from_str(nstr).get(), - None => unsafe { - // Using more threads than cores in test code to force - // the OS to preempt them frequently. Assuming that - // this help stress test concurrent types. - rust_get_num_cpus() * 2 - } - }; - - let sleepers = SleeperList::new(); - let work_queue = WorkQueue::new(); - - let mut handles = ~[]; - let mut scheds = ~[]; - - // create a few special schedulers, those with even indicies - // will be pinned-only - for uint::range(0, nthreads) |i| { - let special = (i % 2) == 0; - let loop_ = ~UvEventLoop::new(); - let mut sched = ~Scheduler::new_special( - loop_, work_queue.clone(), sleepers.clone(), special); - let handle = sched.make_handle(); - handles.push(handle); - scheds.push(sched); - } - - // Schedule a pile o tasks - let n = 5*stress_factor(); - for uint::range(0,n) |_i| { - rtdebug!("creating task: %u", _i); - let hf: ~fn() = || { assert!(true) }; - spawntask_homed(&mut scheds, hf); - } - - // Now we want another pile o tasks that do not ever run on a - // special scheduler, because they are normal tasks. Because - // we can we put these in the "main" task. - - let n = 5*stress_factor(); - - let f: ~fn() = || { - for uint::range(0,n) |_| { - let f: ~fn() = || { - // Borrow the scheduler we run on and check if it is - // privileged. - do Local::borrow:: |sched| { - assert!(sched.run_anything); - }; - }; - spawntask_random(f); - }; - }; - - let f_cell = Cell::new(f); - let handles = Cell::new(handles); - - rtdebug!("creating main task"); - - let main_task = ~do Coroutine::new_root(&mut scheds[0].stack_pool) { - f_cell.take()(); - let mut handles = handles.take(); - // Tell schedulers to exit - for handles.mut_iter().advance |handle| { - handle.send(Shutdown); - } - }; - - rtdebug!("queuing main task") - - scheds[0].enqueue_task(main_task); - - let mut threads = ~[]; - - while !scheds.is_empty() { - let sched = scheds.pop(); - let sched_cell = Cell::new(sched); - let thread = do Thread::start { - let sched = sched_cell.take(); - rtdebug!("running sched: %u", sched.sched_id()); - sched.run(); - }; - - threads.push(thread); - } - - rtdebug!("waiting on scheduler threads"); - - // Wait for schedulers - let _threads = threads; - } - - extern { - fn rust_get_num_cpus() -> libc::uintptr_t; - } -} - - /// Test tasks will abort on failure instead of unwinding pub fn spawntask(f: ~fn()) { use super::sched::*; + let f = Cell::new(f); - rtdebug!("spawntask taking the scheduler from TLS") - let task = do Local::borrow::() |running_task| { - ~running_task.new_child() + let task = unsafe { + let sched = Local::unsafe_borrow::(); + rtdebug!("spawntask taking the scheduler from TLS"); + + + do Local::borrow::() |running_task| { + ~running_task.new_child(&mut (*sched).stack_pool, f.take()) + } }; - let mut sched = Local::take::(); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - task, f); + rtdebug!("new task pointer: %x", to_uint(task)); + + let sched = Local::take::(); rtdebug!("spawntask scheduling the new task"); sched.schedule_task(task); } + /// Create a new task and run it right now. Aborts on failure pub fn spawntask_immediately(f: ~fn()) { use super::sched::*; - let task = do Local::borrow::() |running_task| { - ~running_task.new_child() + let f = Cell::new(f); + + let task = unsafe { + let sched = Local::unsafe_borrow::(); + do Local::borrow::() |running_task| { + ~running_task.new_child(&mut (*sched).stack_pool, + f.take()) + } }; - let mut sched = Local::take::(); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - task, f); + let sched = Local::take::(); do sched.switch_running_tasks_and_then(task) |sched, task| { sched.enqueue_task(task); } @@ -280,15 +172,16 @@ pub fn spawntask_immediately(f: ~fn()) { /// Create a new task and run it right now. Aborts on failure pub fn spawntask_later(f: ~fn()) { use super::sched::*; + let f = Cell::new(f); - let task = do Local::borrow::() |running_task| { - ~running_task.new_child() + let task = unsafe { + let sched = Local::unsafe_borrow::(); + do Local::borrow::() |running_task| { + ~running_task.new_child(&mut (*sched).stack_pool, f.take()) + } }; let mut sched = Local::take::(); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - task, f); - sched.enqueue_task(task); Local::put(sched); } @@ -298,13 +191,18 @@ pub fn spawntask_random(f: ~fn()) { use super::sched::*; use rand::{Rand, rng}; - let task = do Local::borrow::() |running_task| { - ~running_task.new_child() + let f = Cell::new(f); + + let task = unsafe { + let sched = Local::unsafe_borrow::(); + do Local::borrow::() |running_task| { + ~running_task.new_child(&mut (*sched).stack_pool, + f.take()) + + } }; let mut sched = Local::take::(); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - task, f); let mut rng = rng(); let run_now: bool = Rand::rand(&mut rng); @@ -343,33 +241,49 @@ pub fn spawntask_homed(scheds: &mut ~[~Scheduler], f: ~fn()) { f() }; - ~Coroutine::with_task_homed(&mut sched.stack_pool, - ~Task::new_root(), - af, - Sched(handle)) + ~Task::new_root_homed(&mut sched.stack_pool, + Sched(handle), + af) }; let dest_sched = &mut scheds[rng.gen_int_range(0,scheds.len() as int)]; // enqueue it for future execution dest_sched.enqueue_task(task); } -/// Spawn a task and wait for it to finish, returning whether it completed successfully or failed +/// Spawn a task and wait for it to finish, returning whether it +/// completed successfully or failed pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { use cell::Cell; use super::sched::*; + let f = Cell::new(f); + let (port, chan) = oneshot(); let chan = Cell::new(chan); - let mut new_task = ~Task::new_root(); let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status); + let mut new_task = unsafe { + let sched = Local::unsafe_borrow::(); + do Local::borrow:: |_running_task| { + + // I don't understand why using a child task here fails. I + // think the fail status is propogating back up the task + // tree and triggering a fail for the parent, which we + // aren't correctly expecting. + + // ~running_task.new_child(&mut (*sched).stack_pool, + ~Task::new_root(&mut (*sched).stack_pool, + f.take()) + } + }; new_task.on_exit = Some(on_exit); - let mut sched = Local::take::(); - let new_task = ~Coroutine::with_task(&mut sched.stack_pool, - new_task, f); + + let sched = Local::take::(); do sched.switch_running_tasks_and_then(new_task) |sched, old_task| { sched.enqueue_task(old_task); } + rtdebug!("enqueued the new task, now waiting on exit_status"); + let exit_status = port.recv(); if exit_status { Ok(()) } else { Err(()) } } @@ -378,23 +292,27 @@ pub fn spawntask_try(f: ~fn()) -> Result<(), ()> { pub fn spawntask_thread(f: ~fn()) -> Thread { use rt::sched::*; - let task = do Local::borrow::() |running_task| { - ~running_task.new_child() + let f = Cell::new(f); + + let task = unsafe { + let sched = Local::unsafe_borrow::(); + do Local::borrow::() |running_task| { + ~running_task.new_child(&mut (*sched).stack_pool, + f.take()) + } }; let task = Cell::new(task); - let f = Cell::new(f); + let thread = do Thread::start { let mut sched = ~new_test_uv_sched(); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - task.take(), - f.take()); - sched.enqueue_task(task); + sched.enqueue_task(task.take()); sched.run(); }; return thread; } + /// Get a port number, starting at 9600, for use in tests pub fn next_test_port() -> u16 { unsafe { @@ -410,7 +328,8 @@ pub fn next_test_ip4() -> IpAddr { Ipv4(127, 0, 0, 1, next_test_port()) } -/// Get a constant that represents the number of times to repeat stress tests. Default 1. +/// Get a constant that represents the number of times to repeat +/// stress tests. Default 1. pub fn stress_factor() -> uint { use os::getenv; diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs index 89f3d10b5e4cf..013eb438c3657 100644 --- a/src/libstd/rt/tube.rs +++ b/src/libstd/rt/tube.rs @@ -16,14 +16,15 @@ use option::*; use clone::Clone; use super::rc::RC; -use rt::sched::{Scheduler, Coroutine}; +use rt::sched::Scheduler; use rt::{context, TaskContext, SchedulerContext}; use rt::local::Local; +use rt::task::Task; use vec::OwnedVector; use container::Container; struct TubeState { - blocked_task: Option<~Coroutine>, + blocked_task: Option<~Task>, buf: ~[T] } diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 298277b3df05b..b1708e70733ea 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -29,7 +29,10 @@ use unstable::sync::{Exclusive, exclusive}; #[cfg(test)] use container::Container; #[cfg(test)] use uint; #[cfg(test)] use unstable::run_in_bare_thread; -#[cfg(test)] use rt::test::*; +#[cfg(test)] use rt::test::{spawntask_immediately, + next_test_ip4, + run_in_newsched_task}; + pub struct UvEventLoop { uvio: UvIoFactory diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index 99858feab224c..b0fc6b2884f01 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -1182,3 +1182,4 @@ fn test_simple_newsched_spawn() { spawn(||()) } } + diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs index 63eb768d1c9cd..aea8cda6a2102 100644 --- a/src/libstd/task/spawn.rs +++ b/src/libstd/task/spawn.rs @@ -581,13 +581,20 @@ pub fn spawn_raw(opts: TaskOpts, f: ~fn()) { fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) { use rt::sched::*; - let mut task = if opts.linked { - do Local::borrow::() |running_task| { - ~running_task.new_child() + let f = Cell::new(f); + + let mut task = unsafe { + let sched = Local::unsafe_borrow::(); + rtdebug!("unsafe borrowed sched"); + + if opts.linked { + do Local::borrow::() |running_task| { + ~running_task.new_child(&mut (*sched).stack_pool, f.take()) + } + } else { + // An unlinked task is a new root in the task tree + ~Task::new_root(&mut (*sched).stack_pool, f.take()) } - } else { - // An unlinked task is a new root in the task tree - ~Task::new_root() }; if opts.notify_chan.is_some() { @@ -601,9 +608,13 @@ fn spawn_raw_newsched(mut opts: TaskOpts, f: ~fn()) { task.on_exit = Some(on_exit); } + rtdebug!("spawn about to take scheduler"); + let mut sched = Local::take::(); - let task = ~Coroutine::with_task(&mut sched.stack_pool, - task, f); + rtdebug!("took sched in spawn"); +// let task = ~Coroutine::with_task(&mut sched.stack_pool, +// task, f); +// let task = ~Task::new_root(&mut sched.stack_pool, f); sched.schedule_task(task); } diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs index f750b31a466b9..3a071af5d4cfd 100644 --- a/src/libstd/unstable/lang.rs +++ b/src/libstd/unstable/lang.rs @@ -23,6 +23,7 @@ use option::{Option, Some, None}; use io; use rt::global_heap; use rt::borrowck; +use borrow::to_uint; #[allow(non_camel_case_types)] pub type rust_task = c_void; @@ -90,6 +91,9 @@ pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char { _ => { let mut alloc = ::ptr::null(); do Local::borrow:: |task| { + rtdebug!("task pointer: %x, heap pointer: %x", + to_uint(task), + to_uint(&task.heap)); alloc = task.heap.alloc(td as *c_void, size as uint) as *c_char; } return alloc; From 0e07c8d249f0a67c1afbeb1379b8a6993f6a26f0 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 1 Jul 2013 16:38:17 -0700 Subject: [PATCH 095/111] rt: Add global_args_lock functions to rustrt.def.in --- src/rt/rustrt.def.in | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index d85700435e008..a08a142df7719 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -245,4 +245,6 @@ rust_update_log_settings rust_running_on_valgrind rust_get_num_cpus rust_get_global_args_ptr -rust_current_boxed_region \ No newline at end of file +rust_current_boxed_region +rust_take_global_args_lock +rust_drop_global_args_lock \ No newline at end of file From 27818ea7c453f6ecc39fb45dc0fef3cd107db4e8 Mon Sep 17 00:00:00 2001 From: toddaaro Date: Mon, 1 Jul 2013 16:51:59 -0700 Subject: [PATCH 096/111] removed unnecessary import that slipped in during merge --- src/libstd/rt/sched.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index b849bb6903386..7d8c673636e1b 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -11,7 +11,6 @@ use option::*; use sys; use cast::transmute; -use cell::Cell; use clone::Clone; use super::sleeper_list::SleeperList; From 6fd15ffbf9b121ecd727f7c6c1b886a0e643c656 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 2 Jul 2013 11:15:56 -0700 Subject: [PATCH 097/111] std::rt: Ignore homed task tests --- src/libstd/rt/sched.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs index 26956c49a629a..7813579666a94 100644 --- a/src/libstd/rt/sched.rs +++ b/src/libstd/rt/sched.rs @@ -1127,6 +1127,7 @@ mod test { // times. #[test] + #[ignore(reason = "iloopy")] fn test_stress_homed_tasks() { let n = stress_factor(); for int::range(0,n as int) |_| { From 060717828074f31902e74e557ade8eeb47e1813a Mon Sep 17 00:00:00 2001 From: toddaaro Date: Tue, 2 Jul 2013 11:44:51 -0700 Subject: [PATCH 098/111] A missing ! made it so that the testcase schedule_home_states was throwing spurious assert failures. Why this did not result in the test case failing previously is beyond me. --- src/libstd/rt/task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 333eaa5391220..b2e4f0d4716ff 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -212,7 +212,7 @@ impl Task { /// On a special scheduler? pub fn on_special() -> bool { do Local::borrow:: |sched| { - sched.run_anything + !sched.run_anything } } From f8a4d09f7efb618ca3f8b70374e158504cb33cb0 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 24 Jun 2013 23:02:03 -0700 Subject: [PATCH 099/111] std: Use the same task failure message as C++ rt --- src/libstd/sys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstd/sys.rs b/src/libstd/sys.rs index 523c5d633cf44..ad6f1d23c10ac 100644 --- a/src/libstd/sys.rs +++ b/src/libstd/sys.rs @@ -203,8 +203,8 @@ pub fn begin_unwind_(msg: *c_char, file: *c_char, line: size_t) -> ! { let msg = str::raw::from_c_str(msg); let file = str::raw::from_c_str(file); - let outmsg = fmt!("task failed: '%s' at line %i of file %s", - msg, line as int, file); + let outmsg = fmt!("task failed at '%s', %s:%i", + msg, file, line as int); // XXX: Logging doesn't work correctly in non-task context because it // invokes the local heap From e6c57793be2cf7aabfa96aeada77935cc0351067 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 2 Jul 2013 16:40:57 -0700 Subject: [PATCH 100/111] IPv6 support for UDP and TCP. --- src/libstd/rt/io/net/tcp.rs | 207 +++++++++++++++++- src/libstd/rt/io/net/udp.rs | 73 ++++++- src/libstd/rt/rtio.rs | 30 ++- src/libstd/rt/test.rs | 9 +- src/libstd/rt/uv/net.rs | 419 ++++++++++++++++++++++++++++-------- src/libstd/rt/uv/uvio.rs | 69 ++++-- src/libstd/rt/uv/uvll.rs | 92 +++++++- src/rt/rust_uv.cpp | 94 ++++++++ src/rt/rustrt.def.in | 15 ++ 9 files changed, 877 insertions(+), 131 deletions(-) diff --git a/src/libstd/rt/io/net/tcp.rs b/src/libstd/rt/io/net/tcp.rs index 947fade096b92..2425c909bf3d8 100644 --- a/src/libstd/rt/io/net/tcp.rs +++ b/src/libstd/rt/io/net/tcp.rs @@ -148,7 +148,7 @@ mod test { } #[test] - fn smoke_test() { + fn smoke_test_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); @@ -168,7 +168,27 @@ mod test { } #[test] - fn read_eof() { + fn smoke_test_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + let mut stream = listener.accept(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); + } + + do spawntask_immediately { + let mut stream = TcpStream::connect(addr); + stream.write([99]); + } + } + } + + #[test] + fn read_eof_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); @@ -188,7 +208,27 @@ mod test { } #[test] - fn read_eof_twice() { + fn read_eof_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + let mut stream = listener.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); + } + + do spawntask_immediately { + let _stream = TcpStream::connect(addr); + // Close + } + } + } + + #[test] + fn read_eof_twice_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); @@ -210,7 +250,29 @@ mod test { } #[test] - fn write_close() { + fn read_eof_twice_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + let mut stream = listener.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); + let nread = stream.read(buf); + assert!(nread.is_none()); + } + + do spawntask_immediately { + let _stream = TcpStream::connect(addr); + // Close + } + } + } + + #[test] + fn write_close_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); @@ -239,7 +301,36 @@ mod test { } #[test] - fn multiple_connect_serial() { + fn write_close_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + let mut stream = listener.accept(); + let buf = [0]; + loop { + let mut stop = false; + do io_error::cond.trap(|e| { + // NB: ECONNRESET on linux, EPIPE on mac + assert!(e.kind == ConnectionReset || e.kind == BrokenPipe); + stop = true; + }).in { + stream.write(buf); + } + if stop { break } + } + } + + do spawntask_immediately { + let _stream = TcpStream::connect(addr); + // Close + } + } + } + + #[test] + fn multiple_connect_serial_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); let max = 10; @@ -264,7 +355,32 @@ mod test { } #[test] - fn multiple_connect_interleaved_greedy_schedule() { + fn multiple_connect_serial_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + let max = 10; + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + for max.times { + let mut stream = listener.accept(); + let mut buf = [0]; + stream.read(buf); + assert_eq!(buf[0], 99); + } + } + + do spawntask_immediately { + for max.times { + let mut stream = TcpStream::connect(addr); + stream.write([99]); + } + } + } + } + + #[test] + fn multiple_connect_interleaved_greedy_schedule_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); static MAX: int = 10; @@ -303,7 +419,46 @@ mod test { } #[test] - fn multiple_connect_interleaved_lazy_schedule() { + fn multiple_connect_interleaved_greedy_schedule_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + static MAX: int = 10; + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + for int::range(0, MAX) |i| { + let stream = Cell::new(listener.accept()); + rtdebug!("accepted"); + // Start another task to handle the connection + do spawntask_immediately { + let mut stream = stream.take(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == i as u8); + rtdebug!("read"); + } + } + } + + connect(0, addr); + + fn connect(i: int, addr: IpAddr) { + if i == MAX { return } + + do spawntask_immediately { + rtdebug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + rtdebug!("writing"); + stream.write([i as u8]); + } + } + } + } + + #[test] + fn multiple_connect_interleaved_lazy_schedule_ip4() { do run_in_newsched_task { let addr = next_test_ip4(); static MAX: int = 10; @@ -340,5 +495,43 @@ mod test { } } } + #[test] + fn multiple_connect_interleaved_lazy_schedule_ip6() { + do run_in_newsched_task { + let addr = next_test_ip6(); + static MAX: int = 10; + + do spawntask_immediately { + let mut listener = TcpListener::bind(addr); + for int::range(0, MAX) |_| { + let stream = Cell::new(listener.accept()); + rtdebug!("accepted"); + // Start another task to handle the connection + do spawntask_later { + let mut stream = stream.take(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); + rtdebug!("read"); + } + } + } + + connect(0, addr); + + fn connect(i: int, addr: IpAddr) { + if i == MAX { return } + + do spawntask_later { + rtdebug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + rtdebug!("writing"); + stream.write([99]); + } + } + } + } } diff --git a/src/libstd/rt/io/net/udp.rs b/src/libstd/rt/io/net/udp.rs index c66f7d8ce0616..f3b5278357392 100644 --- a/src/libstd/rt/io/net/udp.rs +++ b/src/libstd/rt/io/net/udp.rs @@ -115,7 +115,7 @@ mod test { } #[test] - fn socket_smoke_test() { + fn socket_smoke_test_ip4() { do run_in_newsched_task { let server_ip = next_test_ip4(); let client_ip = next_test_ip4(); @@ -147,7 +147,39 @@ mod test { } #[test] - fn stream_smoke_test() { + fn socket_smoke_test_ip6() { + do run_in_newsched_task { + let server_ip = next_test_ip6(); + let client_ip = next_test_ip6(); + + do spawntask_immediately { + match UdpSocket::bind(server_ip) { + Some(server) => { + let mut buf = [0]; + match server.recvfrom(buf) { + Some((nread, src)) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + assert_eq!(src, client_ip); + } + None => fail!() + } + } + None => fail!() + } + } + + do spawntask_immediately { + match UdpSocket::bind(client_ip) { + Some(client) => client.sendto([99], server_ip), + None => fail!() + } + } + } + } + + #[test] + fn stream_smoke_test_ip4() { do run_in_newsched_task { let server_ip = next_test_ip4(); let client_ip = next_test_ip4(); @@ -182,4 +214,41 @@ mod test { } } } + + #[test] + fn stream_smoke_test_ip6() { + do run_in_newsched_task { + let server_ip = next_test_ip6(); + let client_ip = next_test_ip6(); + + do spawntask_immediately { + match UdpSocket::bind(server_ip) { + Some(server) => { + let server = ~server; + let mut stream = server.connect(client_ip); + let mut buf = [0]; + match stream.read(buf) { + Some(nread) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + } + None => fail!() + } + } + None => fail!() + } + } + + do spawntask_immediately { + match UdpSocket::bind(client_ip) { + Some(client) => { + let client = ~client; + let mut stream = client.connect(server_ip); + stream.write([99]); + } + None => fail!() + } + } + } + } } diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index e38c952f744f0..bcbdea03234c8 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -23,6 +23,7 @@ pub type IoFactoryObject = uvio::UvIoFactory; pub type RtioTcpStreamObject = uvio::UvTcpStream; pub type RtioTcpListenerObject = uvio::UvTcpListener; pub type RtioUdpSocketObject = uvio::UvUdpSocket; +pub type RtioTcpSocketObject = (); // TODO pub trait EventLoop { fn run(&mut self); @@ -48,16 +49,39 @@ pub trait IoFactory { fn udp_bind(&mut self, addr: IpAddr) -> Result<~RtioUdpSocketObject, IoError>; } -pub trait RtioTcpListener { +pub trait RtioTcpListener : RtioSocket { fn accept(&mut self) -> Result<~RtioTcpStreamObject, IoError>; + fn accept_simultaneously(&self); + fn dont_accept_simultaneously(&self); } -pub trait RtioTcpStream { +pub trait RtioTcpStream : RtioSocket { fn read(&self, buf: &mut [u8]) -> Result; fn write(&self, buf: &[u8]) -> Result<(), IoError>; + fn peer_name(&self) -> IpAddr; + fn control_congestion(&self); + fn nodelay(&self); + fn keepalive(&self, delay_in_seconds: uint); + fn letdie(&self); } -pub trait RtioUdpSocket { +pub trait RtioSocket { + fn socket_name(&self) -> IpAddr; +} + +pub trait RtioUdpSocket : RtioSocket { fn recvfrom(&self, buf: &mut [u8]) -> Result<(uint, IpAddr), IoError>; fn sendto(&self, buf: &[u8], dst: IpAddr) -> Result<(), IoError>; + + fn join_multicast(&self, multi: IpAddr); + fn leave_multicast(&self, multi: IpAddr); + + fn loop_multicast_locally(&self); + fn dont_loop_multicast_locally(&self); + + fn multicast_time_to_live(&self, ttl: int); + fn time_to_live(&self, ttl: int); + + fn hear_broadcasts(&self); + fn ignore_broadcasts(&self); } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index b0e4968401474..e1b338e2cad18 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -17,7 +17,7 @@ use iterator::IteratorUtil; use vec::{OwnedVector, MutableVector}; use result::{Result, Ok, Err}; use unstable::run_in_bare_thread; -use super::io::net::ip::{IpAddr, Ipv4}; +use super::io::net::ip::{IpAddr, Ipv4, Ipv6}; use rt::comm::oneshot; use rt::task::Task; use rt::thread::Thread; @@ -405,11 +405,16 @@ pub fn next_test_port() -> u16 { } } -/// Get a unique localhost:port pair starting at 9600 +/// Get a unique IPv4 localhost:port pair starting at 9600 pub fn next_test_ip4() -> IpAddr { Ipv4(127, 0, 0, 1, next_test_port()) } +/// Get a unique IPv6 localhost:port pair starting at 9600 +pub fn next_test_ip6() -> IpAddr { + Ipv6(0, 0, 0, 0, 0, 0, 0, 1, next_test_port()) +} + /// Get a constant that represents the number of times to repeat stress tests. Default 1. pub fn stress_factor() -> uint { use os::getenv; diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index dc766b2d7f88b..4c3cde7d6df6f 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -15,48 +15,144 @@ use rt::uv::uvll::*; use rt::uv::{AllocCallback, ConnectionCallback, ReadCallback, UdpReceiveCallback, UdpSendCallback}; use rt::uv::{Loop, Watcher, Request, UvError, Buf, NativeHandle, NullCallback, status_to_maybe_uv_error}; -use rt::io::net::ip::{IpAddr, Ipv4}; +use rt::io::net::ip::{IpAddr, Ipv4, Ipv6}; use rt::uv::last_uv_error; use vec; use str; use from_str::{FromStr}; +use num; -pub fn ip4_as_uv_ip4(addr: IpAddr, f: &fn(*sockaddr_in) -> T) -> T { - match addr { - Ipv4(a, b, c, d, p) => { - unsafe { - let addr = malloc_ip4_addr(fmt!("%u.%u.%u.%u", - a as uint, - b as uint, - c as uint, - d as uint), p as int); - do (|| { - f(addr) - }).finally { - free_ip4_addr(addr); - } - } +enum UvIpAddr { + UvIpv4(*sockaddr_in), + UvIpv6(*sockaddr_in6), +} + +fn sockaddr_to_UvIpAddr(addr: *uvll::sockaddr) -> UvIpAddr { + unsafe { + assert!((is_ip4_addr(addr) || is_ip6_addr(addr))); + assert!(!(is_ip4_addr(addr) && is_ip6_addr(addr))); + match addr { + _ if is_ip4_addr(addr) => UvIpv4(as_sockaddr_in(addr)), + _ if is_ip6_addr(addr) => UvIpv6(as_sockaddr_in6(addr)), + _ => fail!(), } - _ => fail!() // TODO ipv6 } } -pub fn uv_ip4_to_ip4(addr: *sockaddr_in) -> IpAddr { - let ip4_size = 16; - let buf = vec::from_elem(ip4_size + 1 /*null terminated*/, 0u8); - unsafe { uvll::ip4_name(addr, vec::raw::to_ptr(buf), ip4_size as u64) }; - let port = unsafe { uvll::ip4_port(addr) }; - let ip_str = str::from_bytes_slice(buf).trim_right_chars(&'\x00'); - let ip: ~[u8] = ip_str.split_iter('.') - .transform(|s: &str| -> u8 { - let x = FromStr::from_str(s); - assert!(x.is_some()); - x.unwrap() }) - .collect(); - assert!(ip.len() >= 4); - Ipv4(ip[0], ip[1], ip[2], ip[3], port as u16) +fn ip_as_uv_ip(addr: IpAddr, f: &fn(UvIpAddr) -> T) -> T { + let malloc = match addr { + Ipv4(*) => malloc_ip4_addr, + Ipv6(*) => malloc_ip6_addr, + }; + let wrap = match addr { + Ipv4(*) => UvIpv4, + Ipv6(*) => UvIpv6, + }; + let ip_str = match addr { + Ipv4(x1, x2, x3, x4, _) => + fmt!("%u.%u.%u.%u", x1 as uint, x2 as uint, x3 as uint, x4 as uint), + Ipv6(x1, x2, x3, x4, x5, x6, x7, x8, _) => + fmt!("%x:%x:%x:%x:%x:%x:%x:%x", + x1 as uint, x2 as uint, x3 as uint, x4 as uint, + x5 as uint, x6 as uint, x7 as uint, x8 as uint), + }; + let port = match addr { + Ipv4(_, _, _, _, p) | Ipv6(_, _, _, _, _, _, _, _, p) => p as int + }; + let free = match addr { + Ipv4(*) => free_ip4_addr, + Ipv6(*) => free_ip6_addr, + }; + + let addr = unsafe { malloc(ip_str, port) }; + do (|| { + f(wrap(addr)) + }).finally { + unsafe { free(addr) }; + } +} + +fn uv_ip_as_ip(addr: UvIpAddr, f: &fn(IpAddr) -> T) -> T { + let ip_size = match addr { + UvIpv4(*) => 4/*groups of*/ * 3/*digits separated by*/ + 3/*periods*/, + UvIpv6(*) => 8/*groups of*/ * 4/*hex digits separated by*/ + 7 /*colons*/, + }; + let ip_name = { + let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); + unsafe { + match addr { + UvIpv4(addr) => uvll::ip4_name(addr, vec::raw::to_ptr(buf), ip_size as u64), + UvIpv6(addr) => uvll::ip6_name(addr, vec::raw::to_ptr(buf), ip_size as u64), + } + }; + buf + }; + let ip_port = unsafe { + let port = match addr { + UvIpv4(addr) => uvll::ip4_port(addr), + UvIpv6(addr) => uvll::ip6_port(addr), + }; + port as u16 + }; + let ip_str = str::from_bytes_slice(ip_name).trim_right_chars(&'\x00'); + let ip = match addr { + UvIpv4(*) => { + let ip: ~[u8] = + ip_str.split_iter('.') + .transform(|s: &str| -> u8 { FromStr::from_str(s).unwrap() }) + .collect(); + assert_eq!(ip.len(), 4); + Ipv4(ip[0], ip[1], ip[2], ip[3], ip_port) + }, + UvIpv6(*) => { + let ip: ~[u16] = { + let read_hex_segment = |s: &str| -> u16 { + num::FromStrRadix::from_str_radix(s, 16u).unwrap() + }; + let convert_each_segment = |s: &str| -> ~[u16] { + match s { + "" => ~[], + s => s.split_iter(':').transform(read_hex_segment).collect(), + } + }; + let expand_shorthand_and_convert = |s: &str| -> ~[~[u16]] { + s.split_str_iter("::").transform(convert_each_segment).collect() + }; + match expand_shorthand_and_convert(ip_str) { + [x] => x, // no shorthand found + [l, r] => l + vec::from_elem(8 - l.len() - r.len(), 0u16) + r, // fill the gap + _ => fail!(), // impossible. only one shorthand allowed. + } + }; + assert_eq!(ip.len(), 8); + Ipv6(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7], ip_port) + }, + }; + + // finally run the closure + f(ip) +} + +fn uv_ip_to_ip(addr: UvIpAddr) -> IpAddr { + use util; + uv_ip_as_ip(addr, util::id) } +#[cfg(test)] +#[test] +fn test_ip4_conversion() { + use rt; + let ip4 = rt::test::next_test_ip4(); + assert_eq!(ip4, ip_as_uv_ip(ip4, uv_ip_to_ip)); +} + +#[cfg(test)] +#[test] +fn test_ip6_conversion() { + use rt; + let ip6 = rt::test::next_test_ip6(); + assert_eq!(ip6, ip_as_uv_ip(ip6, uv_ip_to_ip)); +} // uv_stream t is the parent class of uv_tcp_t, uv_pipe_t, uv_tty_t // and uv_file_t @@ -169,18 +265,17 @@ impl TcpWatcher { } pub fn bind(&mut self, address: IpAddr) -> Result<(), UvError> { - match address { - Ipv4(*) => { - do ip4_as_uv_ip4(address) |addr| { - let result = unsafe { uvll::tcp_bind(self.native_handle(), addr) }; - if result == 0 { - Ok(()) - } else { - Err(last_uv_error(self)) - } + do ip_as_uv_ip(address) |addr| { + let result = unsafe { + match addr { + UvIpv4(addr) => uvll::tcp_bind(self.native_handle(), addr), + UvIpv6(addr) => uvll::tcp_bind6(self.native_handle(), addr), } + }; + match result { + 0 => Ok(()), + _ => Err(last_uv_error(self)), } - _ => fail!() } } @@ -190,16 +285,13 @@ impl TcpWatcher { self.get_watcher_data().connect_cb = Some(cb); let connect_handle = ConnectRequest::new().native_handle(); - match address { - Ipv4(*) => { - do ip4_as_uv_ip4(address) |addr| { - rtdebug!("connect_t: %x", connect_handle as uint); - assert_eq!(0, - uvll::tcp_connect(connect_handle, self.native_handle(), - addr, connect_cb)); - } - } - _ => fail!() + rtdebug!("connect_t: %x", connect_handle as uint); + do ip_as_uv_ip(address) |addr| { + let result = match addr { + UvIpv4(addr) => uvll::tcp_connect(connect_handle, self.native_handle(), addr, connect_cb), + UvIpv6(addr) => uvll::tcp_connect6(connect_handle, self.native_handle(), addr, connect_cb), + }; + assert_eq!(0, result); } extern fn connect_cb(req: *uvll::uv_connect_t, status: c_int) { @@ -266,20 +358,17 @@ impl UdpWatcher { } pub fn bind(&self, address: IpAddr) -> Result<(), UvError> { - match address { - Ipv4(*) => { - do ip4_as_uv_ip4(address) |addr| { - let result = unsafe { - uvll::udp_bind(self.native_handle(), addr, 0u32) - }; - if result == 0 { - Ok(()) - } else { - Err(last_uv_error(self)) - } + do ip_as_uv_ip(address) |addr| { + let result = unsafe { + match addr { + UvIpv4(addr) => uvll::udp_bind(self.native_handle(), addr, 0u32), + UvIpv6(addr) => uvll::udp_bind6(self.native_handle(), addr, 0u32), } + }; + match result { + 0 => Ok(()), + _ => Err(last_uv_error(self)), } - _ => fail!() // TODO ipv6 } } @@ -299,17 +388,15 @@ impl UdpWatcher { return (*alloc_cb)(suggested_size as uint); } - /* TODO the socket address should actually be a pointer to - either a sockaddr_in or sockaddr_in6. - In libuv, the udp_recv callback takes a struct *sockaddr */ extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, - addr: *uvll::sockaddr_in, flags: c_uint) { + addr: *uvll::sockaddr, flags: c_uint) { rtdebug!("buf addr: %x", buf.base as uint); rtdebug!("buf len: %d", buf.len as int); let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle); let cb = udp_watcher.get_watcher_data().udp_recv_cb.get_ref(); let status = status_to_maybe_uv_error(handle, nread as c_int); - (*cb)(udp_watcher, nread as int, buf, uv_ip4_to_ip4(addr), flags as uint, status); + let addr = uv_ip_to_ip(sockaddr_to_UvIpAddr(addr)); + (*cb)(udp_watcher, nread as int, buf, addr, flags as uint, status); } } @@ -326,17 +413,14 @@ impl UdpWatcher { } let req = UdpSendRequest::new(); - match address { - Ipv4(*) => { - do ip4_as_uv_ip4(address) |addr| { - unsafe { - assert_eq!(0, uvll::udp_send(req.native_handle(), - self.native_handle(), - [buf], addr, send_cb)); - } + do ip_as_uv_ip(address) |addr| { + let result = unsafe { + match addr { + UvIpv4(addr) => uvll::udp_send(req.native_handle(), self.native_handle(), [buf], addr, send_cb), + UvIpv6(addr) => uvll::udp_send6(req.native_handle(), self.native_handle(), [buf], addr, send_cb), } - } - _ => fail!() // TODO ipv6 + }; + assert_eq!(0, result); } extern fn send_cb(req: *uvll::uv_udp_send_t, status: c_int) { @@ -486,18 +570,30 @@ mod test { use rt::uv::{vec_from_uv_buf, vec_to_uv_buf, slice_to_uv_buf}; #[test] - fn test_ip4_conversion() { - let ip4 = next_test_ip4(); - assert_eq!(ip4, ip4_as_uv_ip4(ip4, uv_ip4_to_ip4)); + fn connect_close_ip4() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; + // Connect to a port where nobody is listening + let addr = next_test_ip4(); + do tcp_watcher.connect(addr) |stream_watcher, status| { + rtdebug!("tcp_watcher.connect!"); + assert!(status.is_some()); + assert_eq!(status.get().name(), ~"ECONNREFUSED"); + stream_watcher.close(||()); + } + loop_.run(); + loop_.close(); + } } #[test] - fn connect_close() { + fn connect_close_ip6() { do run_in_bare_thread() { let mut loop_ = Loop::new(); let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; // Connect to a port where nobody is listening - let addr = next_test_ip4(); + let addr = next_test_ip6(); do tcp_watcher.connect(addr) |stream_watcher, status| { rtdebug!("tcp_watcher.connect!"); assert!(status.is_some()); @@ -510,7 +606,7 @@ mod test { } #[test] - fn udp_bind_close() { + fn udp_bind_close_ip4() { do run_in_bare_thread() { let mut loop_ = Loop::new(); let udp_watcher = { UdpWatcher::new(&mut loop_) }; @@ -523,7 +619,20 @@ mod test { } #[test] - fn listen() { + fn udp_bind_close_ip6() { + do run_in_bare_thread() { + let mut loop_ = Loop::new(); + let udp_watcher = { UdpWatcher::new(&mut loop_) }; + let addr = next_test_ip6(); + udp_watcher.bind(addr); + udp_watcher.close(||()); + loop_.run(); + loop_.close(); + } + } + + #[test] + fn listen_ip4() { do run_in_bare_thread() { static MAX: int = 10; let mut loop_ = Loop::new(); @@ -532,10 +641,82 @@ mod test { server_tcp_watcher.bind(addr); let loop_ = loop_; rtdebug!("listening"); - do server_tcp_watcher.listen |server_stream_watcher, status| { + do server_tcp_watcher.listen |mut server_stream_watcher, status| { + rtdebug!("listened!"); + assert!(status.is_none()); + let mut loop_ = loop_; + let client_tcp_watcher = TcpWatcher::new(&mut loop_); + let mut client_tcp_watcher = client_tcp_watcher.as_stream(); + server_stream_watcher.accept(client_tcp_watcher); + let count_cell = Cell::new(0); + let server_stream_watcher = server_stream_watcher; + rtdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0)) + }; + do client_tcp_watcher.read_start(alloc) |stream_watcher, nread, buf, status| { + + rtdebug!("i'm reading!"); + let buf = vec_from_uv_buf(buf); + let mut count = count_cell.take(); + if status.is_none() { + rtdebug!("got %d bytes", nread); + let buf = buf.unwrap(); + for buf.slice(0, nread as uint).each |byte| { + assert!(*byte == count as u8); + rtdebug!("%u", *byte as uint); + count += 1; + } + } else { + assert_eq!(count, MAX); + do stream_watcher.close { + server_stream_watcher.close(||()); + } + } + count_cell.put_back(count); + } + } + + let _client_thread = do Thread::start { + rtdebug!("starting client thread"); + let mut loop_ = Loop::new(); + let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; + do tcp_watcher.connect(addr) |mut stream_watcher, status| { + rtdebug!("connecting"); + assert!(status.is_none()); + let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; + let buf = slice_to_uv_buf(msg); + let msg_cell = Cell::new(msg); + do stream_watcher.write(buf) |stream_watcher, status| { + rtdebug!("writing"); + assert!(status.is_none()); + let msg_cell = Cell::new(msg_cell.take()); + stream_watcher.close(||ignore(msg_cell.take())); + } + } + loop_.run(); + loop_.close(); + }; + + let mut loop_ = loop_; + loop_.run(); + loop_.close(); + } + } + + #[test] + fn listen_ip6() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let mut server_tcp_watcher = { TcpWatcher::new(&mut loop_) }; + let addr = next_test_ip6(); + server_tcp_watcher.bind(addr); + let loop_ = loop_; + rtdebug!("listening"); + do server_tcp_watcher.listen |mut server_stream_watcher, status| { rtdebug!("listened!"); assert!(status.is_none()); - let mut server_stream_watcher = server_stream_watcher; let mut loop_ = loop_; let client_tcp_watcher = TcpWatcher::new(&mut loop_); let mut client_tcp_watcher = client_tcp_watcher.as_stream(); @@ -574,10 +755,9 @@ mod test { rtdebug!("starting client thread"); let mut loop_ = Loop::new(); let mut tcp_watcher = { TcpWatcher::new(&mut loop_) }; - do tcp_watcher.connect(addr) |stream_watcher, status| { + do tcp_watcher.connect(addr) |mut stream_watcher, status| { rtdebug!("connecting"); assert!(status.is_none()); - let mut stream_watcher = stream_watcher; let msg = ~[0, 1, 2, 3, 4, 5, 6 ,7 ,8, 9]; let buf = slice_to_uv_buf(msg); let msg_cell = Cell::new(msg); @@ -599,7 +779,7 @@ mod test { } #[test] - fn udp_recv() { + fn udp_recv_ip4() { do run_in_bare_thread() { static MAX: int = 10; let mut loop_ = Loop::new(); @@ -656,4 +836,63 @@ mod test { loop_.close(); } } + + #[test] + fn udp_recv_ip6() { + do run_in_bare_thread() { + static MAX: int = 10; + let mut loop_ = Loop::new(); + let server_addr = next_test_ip6(); + let client_addr = next_test_ip6(); + + let server = UdpWatcher::new(&loop_); + assert!(server.bind(server_addr).is_ok()); + + rtdebug!("starting read"); + let alloc: AllocCallback = |size| { + vec_to_uv_buf(vec::from_elem(size, 0)) + }; + + do server.recv_start(alloc) |server, nread, buf, src, flags, status| { + server.recv_stop(); + rtdebug!("i'm reading!"); + assert!(status.is_none()); + assert_eq!(flags, 0); + assert_eq!(src, client_addr); + + let buf = vec_from_uv_buf(buf); + let mut count = 0; + rtdebug!("got %d bytes", nread); + + let buf = buf.unwrap(); + for buf.slice(0, nread as uint).iter().advance() |&byte| { + assert!(byte == count as u8); + rtdebug!("%u", byte as uint); + count += 1; + } + assert_eq!(count, MAX); + + server.close(||{}); + } + + do Thread::start { + let mut loop_ = Loop::new(); + let client = UdpWatcher::new(&loop_); + assert!(client.bind(client_addr).is_ok()); + let msg = ~[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let buf = slice_to_uv_buf(msg); + do client.send(buf, server_addr) |client, status| { + rtdebug!("writing"); + assert!(status.is_none()); + client.close(||{}); + } + + loop_.run(); + loop_.close(); + }; + + loop_.run(); + loop_.close(); + } + } } diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 1ae6cd8b17bb7..e1ff8ba1e22ac 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -60,9 +60,8 @@ impl EventLoop for UvEventLoop { fn callback(&mut self, f: ~fn()) { let mut idle_watcher = IdleWatcher::new(self.uvio.uv_loop()); - do idle_watcher.start |idle_watcher, status| { + do idle_watcher.start |mut idle_watcher, status| { assert!(status.is_none()); - let mut idle_watcher = idle_watcher; idle_watcher.stop(); idle_watcher.close(||()); f(); @@ -218,7 +217,7 @@ impl IoFactory for UvIoFactory { rtdebug!("connect: in connect callback"); if status.is_none() { rtdebug!("status is none"); - let res = Ok(~UvTcpStream { watcher: stream_watcher }); + let res = Ok(~UvTcpStream(stream_watcher)); // Store the stream in the task's stack unsafe { (*result_cell_ptr).put_back(res); } @@ -313,6 +312,11 @@ impl Drop for UvTcpListener { } } +impl RtioSocket for UvTcpListener { + // TODO + fn socket_name(&self) -> IpAddr { fail!(); } +} + impl RtioTcpListener for UvTcpListener { fn accept(&mut self) -> Result<~RtioTcpStreamObject, IoError> { @@ -329,15 +333,14 @@ impl RtioTcpListener for UvTcpListener { let incoming_streams_cell = Cell::new(incoming_streams_cell.take()); let mut server_tcp_watcher = server_tcp_watcher; - do server_tcp_watcher.listen |server_stream_watcher, status| { + do server_tcp_watcher.listen |mut server_stream_watcher, status| { let maybe_stream = if status.is_none() { - let mut server_stream_watcher = server_stream_watcher; let mut loop_ = server_stream_watcher.event_loop(); let client_tcp_watcher = TcpWatcher::new(&mut loop_); let client_tcp_watcher = client_tcp_watcher.as_stream(); // XXX: Need's to be surfaced in interface server_stream_watcher.accept(client_tcp_watcher); - Ok(~UvTcpStream { watcher: client_tcp_watcher }) + Ok(~UvTcpStream(client_tcp_watcher)) } else { Err(standard_error(OtherIoError)) }; @@ -349,25 +352,22 @@ impl RtioTcpListener for UvTcpListener { return self.incoming_streams.recv(); } -} -// FIXME #6090: Prefer newtype structs but Drop doesn't work -pub struct UvTcpStream { - watcher: StreamWatcher + // TODO + fn accept_simultaneously(&self) { fail!(); } + fn dont_accept_simultaneously(&self) { fail!(); } } -impl UvTcpStream { - fn watcher(&self) -> StreamWatcher { self.watcher } -} +// FIXME #6090: Prefer newtype structs but Drop doesn't work +pub struct UvTcpStream(StreamWatcher); impl Drop for UvTcpStream { fn finalize(&self) { rtdebug!("closing tcp stream"); - let watcher = self.watcher(); let scheduler = Local::take::(); do scheduler.deschedule_running_task_and_then |_, task| { let task_cell = Cell::new(task); - do watcher.close { + do self.close { let scheduler = Local::take::(); scheduler.resume_task_immediately(task_cell.take()); } @@ -375,6 +375,11 @@ impl Drop for UvTcpStream { } } +impl RtioSocket for UvTcpStream { + // TODO + fn socket_name(&self) -> IpAddr { fail!(); } +} + impl RtioTcpStream for UvTcpStream { fn read(&self, buf: &mut [u8]) -> Result { let result_cell = Cell::new_empty(); @@ -382,25 +387,23 @@ impl RtioTcpStream for UvTcpStream { let scheduler = Local::take::(); assert!(scheduler.in_task_context()); - let watcher = self.watcher(); let buf_ptr: *&mut [u8] = &buf; do scheduler.deschedule_running_task_and_then |sched, task| { rtdebug!("read: entered scheduler context"); assert!(!sched.in_task_context()); - let mut watcher = watcher; let task_cell = Cell::new(task); // XXX: We shouldn't reallocate these callbacks every // call to read let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; - do watcher.read_start(alloc) |watcher, nread, _buf, status| { + let mut watcher = **self; + do watcher.read_start(alloc) |mut watcher, nread, _buf, status| { // Stop reading so that no read callbacks are // triggered before the user calls `read` again. // XXX: Is there a performance impact to calling // stop here? - let mut watcher = watcher; watcher.read_stop(); let result = if status.is_none() { @@ -426,12 +429,11 @@ impl RtioTcpStream for UvTcpStream { let result_cell_ptr: *Cell> = &result_cell; let scheduler = Local::take::(); assert!(scheduler.in_task_context()); - let watcher = self.watcher(); let buf_ptr: *&[u8] = &buf; do scheduler.deschedule_running_task_and_then |_, task| { - let mut watcher = watcher; let task_cell = Cell::new(task); let buf = unsafe { slice_to_uv_buf(*buf_ptr) }; + let mut watcher = **self; do watcher.write(buf) |_watcher, status| { let result = if status.is_none() { Ok(()) @@ -449,6 +451,13 @@ impl RtioTcpStream for UvTcpStream { assert!(!result_cell.is_empty()); return result_cell.take(); } + + // TODO + fn peer_name(&self) -> IpAddr { fail!(); } + fn control_congestion(&self) { fail!(); } + fn nodelay(&self) { fail!(); } + fn keepalive(&self, _delay_in_seconds: uint) { fail!(); } + fn letdie(&self) { fail!(); } } pub struct UvUdpSocket(UdpWatcher); @@ -467,6 +476,11 @@ impl Drop for UvUdpSocket { } } +impl RtioSocket for UvUdpSocket { + // TODO + fn socket_name(&self) -> IpAddr { fail!(); } +} + impl RtioUdpSocket for UvUdpSocket { fn recvfrom(&self, buf: &mut [u8]) -> Result<(uint, IpAddr), IoError> { let result_cell = Cell::new_empty(); @@ -530,6 +544,19 @@ impl RtioUdpSocket for UvUdpSocket { assert!(!result_cell.is_empty()); return result_cell.take(); } + + // TODO + fn join_multicast(&self, _multi: IpAddr) { fail!(); } + fn leave_multicast(&self, _multi: IpAddr) { fail!(); } + + fn loop_multicast_locally(&self) { fail!(); } + fn dont_loop_multicast_locally(&self) { fail!(); } + + fn multicast_time_to_live(&self, _ttl: int) { fail!(); } + fn time_to_live(&self, _ttl: int) { fail!(); } + + fn hear_broadcasts(&self) { fail!(); } + fn ignore_broadcasts(&self) { fail!(); } } #[test] diff --git a/src/libstd/rt/uv/uvll.rs b/src/libstd/rt/uv/uvll.rs index 7035cb6a5e80f..62bf8f27af93d 100644 --- a/src/libstd/rt/uv/uvll.rs +++ b/src/libstd/rt/uv/uvll.rs @@ -74,8 +74,10 @@ pub type uv_alloc_cb = *u8; pub type uv_udp_send_cb = *u8; pub type uv_udp_recv_cb = *u8; +pub type sockaddr = c_void; pub type sockaddr_in = c_void; pub type sockaddr_in6 = c_void; +pub type uv_membership = c_void; #[deriving(Eq)] pub enum uv_handle_type { @@ -231,6 +233,31 @@ pub unsafe fn get_udp_handle_from_send_req(send_req: *uv_udp_send_t) -> *uv_udp_ return rust_uv_get_udp_handle_from_send_req(send_req); } +pub unsafe fn udp_get_sockname(handle: *uv_udp_t, name: *sockaddr_in) -> c_int { + return rust_uv_udp_getsockname(handle, name); +} + +pub unsafe fn udp_get_sockname6(handle: *uv_udp_t, name: *sockaddr_in6) -> c_int { + return rust_uv_udp_getsockname6(handle, name); +} + +pub unsafe fn udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, + interface_addr: *c_char, membership: uv_membership) -> c_int { + return rust_uv_udp_set_membership(handle, multicast_addr, interface_addr, membership); +} + +pub unsafe fn udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int { + return rust_uv_udp_set_multicast_loop(handle, on); +} + +pub unsafe fn udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int { + return rust_uv_udp_set_multicast_ttl(handle, ttl); +} + +pub unsafe fn udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int { + return rust_uv_udp_set_broadcast(handle, on); +} + pub unsafe fn tcp_init(loop_handle: *c_void, handle: *uv_tcp_t) -> c_int { return rust_uv_tcp_init(loop_handle, handle); } @@ -261,6 +288,26 @@ pub unsafe fn tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) - return rust_uv_tcp_getpeername6(tcp_handle_ptr, name); } +pub unsafe fn tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_in) -> c_int { + return rust_uv_tcp_getsockname(handle, name); +} + +pub unsafe fn tcp_getsockname6(handle: *uv_tcp_t, name: *sockaddr_in6) -> c_int { + return rust_uv_tcp_getsockname6(handle, name); +} + +pub unsafe fn tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int { + return rust_uv_tcp_nodelay(handle, enable); +} + +pub unsafe fn tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int { + return rust_uv_tcp_keepalive(handle, enable, delay); +} + +pub unsafe fn tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int { + return rust_uv_tcp_simultaneous_accepts(handle, enable); +} + pub unsafe fn listen(stream: *T, backlog: c_int, cb: *u8) -> c_int { return rust_uv_listen(stream as *c_void, backlog, cb); } @@ -318,6 +365,22 @@ pub unsafe fn timer_stop(timer_ptr: *uv_timer_t) -> c_int { return rust_uv_timer_stop(timer_ptr); } +pub unsafe fn is_ip4_addr(addr: *sockaddr) -> bool { + match rust_uv_is_ipv4_sockaddr(addr) { 0 => false, _ => true } +} + +pub unsafe fn is_ip6_addr(addr: *sockaddr) -> bool { + match rust_uv_is_ipv6_sockaddr(addr) { 0 => false, _ => true } +} + +pub unsafe fn as_sockaddr_in(addr: *sockaddr) -> *sockaddr_in { + return rust_uv_sockaddr_as_sockaddr_in(addr); +} + +pub unsafe fn as_sockaddr_in6(addr: *sockaddr) -> *sockaddr_in6 { + return rust_uv_sockaddr_as_sockaddr_in6(addr); +} + pub unsafe fn malloc_ip4_addr(ip: &str, port: int) -> *sockaddr_in { do str::as_c_str(ip) |ip_buf| { rust_uv_ip4_addrp(ip_buf as *u8, port as libc::c_int) @@ -451,25 +514,42 @@ extern { fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: size_t) -> c_int; fn rust_uv_ip4_port(src: *sockaddr_in) -> c_uint; fn rust_uv_ip6_port(src: *sockaddr_in6) -> c_uint; - fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, after_cb: *u8, + fn rust_uv_tcp_connect(req: *uv_connect_t, handle: *uv_tcp_t, cb: *u8, addr: *sockaddr_in) -> c_int; fn rust_uv_tcp_bind(tcp_server: *uv_tcp_t, addr: *sockaddr_in) -> c_int; - fn rust_uv_tcp_connect6(connect_ptr: *uv_connect_t, tcp_handle_ptr: *uv_tcp_t, after_cb: *u8, + fn rust_uv_tcp_connect6(req: *uv_connect_t, handle: *uv_tcp_t, cb: *u8, addr: *sockaddr_in6) -> c_int; fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t, addr: *sockaddr_in6) -> c_int; fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in) -> c_int; fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t, name: *sockaddr_in6) ->c_int; + fn rust_uv_tcp_getsockname(handle: *uv_tcp_t, name: *sockaddr_in) -> c_int; + fn rust_uv_tcp_getsockname6(handle: *uv_tcp_t, name: *sockaddr_in6) -> c_int; + fn rust_uv_tcp_nodelay(handle: *uv_tcp_t, enable: c_int) -> c_int; + fn rust_uv_tcp_keepalive(handle: *uv_tcp_t, enable: c_int, delay: c_uint) -> c_int; + fn rust_uv_tcp_simultaneous_accepts(handle: *uv_tcp_t, enable: c_int) -> c_int; fn rust_uv_udp_init(loop_handle: *uv_loop_t, handle_ptr: *uv_udp_t) -> c_int; fn rust_uv_udp_bind(server: *uv_udp_t, addr: *sockaddr_in, flags: c_uint) -> c_int; fn rust_uv_udp_bind6(server: *uv_udp_t, addr: *sockaddr_in6, flags: c_uint) -> c_int; - fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, buf_cnt: c_int, - addr: *sockaddr_in, cb: *u8) -> c_int; - fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, buf_cnt: c_int, - addr: *sockaddr_in6, cb: *u8) -> c_int; + fn rust_uv_udp_send(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, + buf_cnt: c_int, addr: *sockaddr_in, cb: *u8) -> c_int; + fn rust_uv_udp_send6(req: *uv_udp_send_t, handle: *uv_udp_t, buf_in: *uv_buf_t, + buf_cnt: c_int, addr: *sockaddr_in6, cb: *u8) -> c_int; fn rust_uv_udp_recv_start(server: *uv_udp_t, on_alloc: *u8, on_recv: *u8) -> c_int; fn rust_uv_udp_recv_stop(server: *uv_udp_t) -> c_int; fn rust_uv_get_udp_handle_from_send_req(req: *uv_udp_send_t) -> *uv_udp_t; + fn rust_uv_udp_getsockname(handle: *uv_udp_t, name: *sockaddr_in) -> c_int; + fn rust_uv_udp_getsockname6(handle: *uv_udp_t, name: *sockaddr_in6) -> c_int; + fn rust_uv_udp_set_membership(handle: *uv_udp_t, multicast_addr: *c_char, + interface_addr: *c_char, membership: uv_membership) -> c_int; + fn rust_uv_udp_set_multicast_loop(handle: *uv_udp_t, on: c_int) -> c_int; + fn rust_uv_udp_set_multicast_ttl(handle: *uv_udp_t, ttl: c_int) -> c_int; + fn rust_uv_udp_set_broadcast(handle: *uv_udp_t, on: c_int) -> c_int; + + fn rust_uv_is_ipv4_sockaddr(addr: *sockaddr) -> c_int; + fn rust_uv_is_ipv6_sockaddr(addr: *sockaddr) -> c_int; + fn rust_uv_sockaddr_as_sockaddr_in(addr: *sockaddr) -> *sockaddr_in; + fn rust_uv_sockaddr_as_sockaddr_in6(addr: *sockaddr) -> *sockaddr_in6; fn rust_uv_listen(stream: *c_void, backlog: c_int, cb: *u8) -> c_int; fn rust_uv_accept(server: *c_void, client: *c_void) -> c_int; diff --git a/src/rt/rust_uv.cpp b/src/rt/rust_uv.cpp index 6032ed1a6bdba..32ccc9ba4a82b 100644 --- a/src/rt/rust_uv.cpp +++ b/src/rt/rust_uv.cpp @@ -293,6 +293,38 @@ rust_uv_tcp_getpeername6 return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen); } +extern "C" int +rust_uv_tcp_getsockname +(uv_tcp_t* handle, sockaddr_in* name) { + int namelen = sizeof(sockaddr_in); + return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen); +} + +extern "C" int +rust_uv_tcp_getsockname6 +(uv_tcp_t* handle, sockaddr_in6* name) { + int namelen = sizeof(sockaddr_in6); + return uv_tcp_getsockname(handle, (sockaddr*)name, &namelen); +} + +extern "C" int +rust_uv_tcp_nodelay +(uv_tcp_t* handle, int enable) { + return uv_tcp_nodelay(handle, enable); +} + +extern "C" int +rust_uv_tcp_keepalive +(uv_tcp_t* handle, int enable, unsigned int delay) { + return uv_tcp_keepalive(handle, enable, delay); +} + +extern "C" int +rust_uv_tcp_simultaneous_accepts +(uv_tcp_t* handle, int enable) { + return uv_tcp_simultaneous_accepts(handle, enable); +} + extern "C" int rust_uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { return uv_udp_init(loop, handle); @@ -335,6 +367,44 @@ rust_uv_get_udp_handle_from_send_req(uv_udp_send_t* send_req) { return send_req->handle; } +extern "C" int +rust_uv_udp_getsockname +(uv_udp_t* handle, sockaddr_in* name) { + int namelen = sizeof(sockaddr_in); + return uv_udp_getsockname(handle, (sockaddr*)name, &namelen); +} + +extern "C" int +rust_uv_udp_getsockname6 +(uv_udp_t* handle, sockaddr_in6* name) { + int namelen = sizeof(sockaddr_in6); + return uv_udp_getsockname(handle, (sockaddr*)name, &namelen); +} + +extern "C" int +rust_uv_udp_set_membership +(uv_udp_t* handle, const char* m_addr, const char* i_addr, uv_membership membership) { + return uv_udp_set_membership(handle, m_addr, i_addr, membership); +} + +extern "C" int +rust_uv_udp_set_multicast_loop +(uv_udp_t* handle, int on) { + return uv_udp_set_multicast_loop(handle, on); +} + +extern "C" int +rust_uv_udp_set_multicast_ttl +(uv_udp_t* handle, int ttl) { + return uv_udp_set_multicast_ttl(handle, ttl); +} + +extern "C" int +rust_uv_udp_set_broadcast +(uv_udp_t* handle, int on) { + return uv_udp_set_broadcast(handle, on); +} + extern "C" int rust_uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { @@ -587,10 +657,34 @@ extern "C" void rust_uv_freeaddrinfo(addrinfo* res) { uv_freeaddrinfo(res); } + +extern "C" int +rust_uv_is_ipv4_sockaddr(sockaddr* addr) { + return addr->sa_family == AF_INET; +} + +extern "C" int +rust_uv_is_ipv6_sockaddr(sockaddr* addr) { + return addr->sa_family == AF_INET6; +} + +extern "C" sockaddr_in* +rust_uv_sockaddr_as_sockaddr_in(sockaddr* addr) { +// return (sockaddr_in*)addr->sa_data; + return (sockaddr_in*)addr; +} + +extern "C" sockaddr_in6* +rust_uv_sockaddr_as_sockaddr_in6(sockaddr* addr) { + //return (sockaddr_in6*)addr->sa_data; + return (sockaddr_in6*)addr; +} + extern "C" bool rust_uv_is_ipv4_addrinfo(addrinfo* input) { return input->ai_family == AF_INET; } + extern "C" bool rust_uv_is_ipv6_addrinfo(addrinfo* input) { return input->ai_family == AF_INET6; diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index b604f60cba6e2..e8a46cf5a652e 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -105,6 +105,11 @@ rust_uv_tcp_connect rust_uv_tcp_bind rust_uv_tcp_connect6 rust_uv_tcp_bind6 +rust_uv_tcp_getsockname +rust_uv_tcp_getsockname6 +rust_uv_tcp_nodelay +rust_uv_tcp_keepalive +rust_uv_tcp_simultaneous_accepts rust_uv_udp_init rust_uv_udp_bind rust_uv_udp_bind6 @@ -113,6 +118,16 @@ rust_uv_udp_send6 rust_uv_udp_recv_start rust_uv_udp_recv_stop rust_uv_get_udp_handle_from_send_req +rust_uv_udp_getsockname +rust_uv_udp_getsockname6 +rust_uv_udp_set_membership +rust_uv_udp_set_multicast_loop +rust_uv_udp_set_multicast_ttl +rust_uv_udp_set_broadcast +rust_uv_is_ipv4_sockaddr +rust_uv_is_ipv6_sockaddr +rust_uv_sockaddr_as_sockaddr_in +rust_uv_sockaddr_as_sockaddr_in6 rust_uv_listen rust_uv_accept rust_uv_write From b60cf0c5b0cf36625083c2624df9fb35d0af3578 Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Tue, 2 Jul 2013 17:27:46 -0700 Subject: [PATCH 101/111] converted TODOs into XXXs --- src/libstd/rt/rtio.rs | 1 - src/libstd/rt/uv/uvio.rs | 14 +++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index bcbdea03234c8..6bf352ee1447f 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -23,7 +23,6 @@ pub type IoFactoryObject = uvio::UvIoFactory; pub type RtioTcpStreamObject = uvio::UvTcpStream; pub type RtioTcpListenerObject = uvio::UvTcpListener; pub type RtioUdpSocketObject = uvio::UvUdpSocket; -pub type RtioTcpSocketObject = (); // TODO pub trait EventLoop { fn run(&mut self); diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index aa1f0fbc1942a..e9bbe3bb6fb9a 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -316,7 +316,7 @@ impl Drop for UvTcpListener { } impl RtioSocket for UvTcpListener { - // TODO + // XXX implement fn socket_name(&self) -> IpAddr { fail!(); } } @@ -356,7 +356,7 @@ impl RtioTcpListener for UvTcpListener { return self.incoming_streams.recv(); } - // TODO + // XXX implement fn accept_simultaneously(&self) { fail!(); } fn dont_accept_simultaneously(&self) { fail!(); } } @@ -379,7 +379,7 @@ impl Drop for UvTcpStream { } impl RtioSocket for UvTcpStream { - // TODO + // XXX implement fn socket_name(&self) -> IpAddr { fail!(); } } @@ -455,7 +455,7 @@ impl RtioTcpStream for UvTcpStream { return result_cell.take(); } - // TODO + // XXX implement fn peer_name(&self) -> IpAddr { fail!(); } fn control_congestion(&self) { fail!(); } fn nodelay(&self) { fail!(); } @@ -480,7 +480,7 @@ impl Drop for UvUdpSocket { } impl RtioSocket for UvUdpSocket { - // TODO + // XXX implement fn socket_name(&self) -> IpAddr { fail!(); } } @@ -498,7 +498,7 @@ impl RtioUdpSocket for UvUdpSocket { let task_cell = Cell::new(task); let alloc: AllocCallback = |_| unsafe { slice_to_uv_buf(*buf_ptr) }; do self.recv_start(alloc) |watcher, nread, _buf, addr, flags, status| { - let _ = flags; // TODO add handling for partials? + let _ = flags; // XXX add handling for partials? watcher.recv_stop(); @@ -548,7 +548,7 @@ impl RtioUdpSocket for UvUdpSocket { return result_cell.take(); } - // TODO + // XXX implement fn join_multicast(&self, _multi: IpAddr) { fail!(); } fn leave_multicast(&self, _multi: IpAddr) { fail!(); } From 6b2abcaa0f4b8f0fdbd0f8dcac5089159f0051da Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 8 Jul 2013 14:19:19 -0700 Subject: [PATCH 102/111] renamed finalize to drop in Drop impl for UvUdpSocket --- src/libstd/rt/uv/uvio.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs index 3f2706e7a998c..5d0c64c686782 100644 --- a/src/libstd/rt/uv/uvio.rs +++ b/src/libstd/rt/uv/uvio.rs @@ -466,7 +466,7 @@ impl RtioTcpStream for UvTcpStream { pub struct UvUdpSocket(UdpWatcher); impl Drop for UvUdpSocket { - fn finalize(&self) { + fn drop(&self) { rtdebug!("closing udp socket"); let scheduler = Local::take::(); do scheduler.deschedule_running_task_and_then |_, task| { From 5e0be468528fdcf977aa1de6ba553b55303ec56a Mon Sep 17 00:00:00 2001 From: Eric Reed Date: Mon, 8 Jul 2013 14:19:39 -0700 Subject: [PATCH 103/111] changed .each() to .iter().advance() --- src/libstd/rt/uv/net.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 72a9a6cdd5cbb..2707b9ce7d7b6 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -106,16 +106,16 @@ fn uv_ip_as_ip(addr: UvIpAddr, f: &fn(IpAddr) -> T) -> T { }, UvIpv6(*) => { let ip: ~[u16] = { - let read_hex_segment = |s: &str| -> u16 { - num::FromStrRadix::from_str_radix(s, 16u).unwrap() - }; - let convert_each_segment = |s: &str| -> ~[u16] { - match s { - "" => ~[], - s => s.split_iter(':').transform(read_hex_segment).collect(), - } - }; let expand_shorthand_and_convert = |s: &str| -> ~[~[u16]] { + let convert_each_segment = |s: &str| -> ~[u16] { + let read_hex_segment = |s: &str| -> u16 { + num::FromStrRadix::from_str_radix(s, 16u).unwrap() + }; + match s { + "" => ~[], + s => s.split_iter(':').transform(read_hex_segment).collect(), + } + }; s.split_str_iter("::").transform(convert_each_segment).collect() }; match expand_shorthand_and_convert(ip_str) { @@ -662,7 +662,7 @@ mod test { if status.is_none() { rtdebug!("got %d bytes", nread); let buf = buf.unwrap(); - for buf.slice(0, nread as uint).each |byte| { + for buf.slice(0, nread as uint).iter().advance() |byte| { assert!(*byte == count as u8); rtdebug!("%u", *byte as uint); count += 1; From 4282539523905c95f34131e5cf8923764079b3be Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 8 Jul 2013 14:41:07 -0700 Subject: [PATCH 104/111] std::rt: Add a hack to allocate different test port ranges to different bots --- src/libstd/rt/test.rs | 46 ++++++++++++++++++++++++++++++++++-- src/rt/rust_test_helpers.cpp | 8 +++---- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs index 659c7eb498573..89687cf3fd2f7 100644 --- a/src/libstd/rt/test.rs +++ b/src/libstd/rt/test.rs @@ -9,6 +9,7 @@ // except according to those terms. use cell::Cell; +use libc; use uint; use option::{Some, None}; use rt::sched::Scheduler; @@ -316,10 +317,10 @@ pub fn spawntask_thread(f: ~fn()) -> Thread { /// Get a port number, starting at 9600, for use in tests pub fn next_test_port() -> u16 { unsafe { - return rust_dbg_next_port() as u16; + return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16; } extern { - fn rust_dbg_next_port() -> ::libc::uintptr_t; + fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t; } } @@ -328,6 +329,47 @@ pub fn next_test_ip4() -> IpAddr { Ipv4(127, 0, 0, 1, next_test_port()) } +/* +XXX: Welcome to MegaHack City. + +The bots run multiple builds at the same time, and these builds +all want to use ports. This function figures out which workspace +it is running in and assigns a port range based on it. +*/ +fn base_port() -> uint { + use os; + use str::StrSlice; + use to_str::ToStr; + use vec::ImmutableVector; + + let base = 9600u; + let range = 1000; + + let bases = [ + ("32-opt", base + range * 1), + ("32-noopt", base + range * 2), + ("64-opt", base + range * 3), + ("64-noopt", base + range * 4), + ("64-opt-vg", base + range * 5), + ("all-opt", base + range * 6), + ("snap3", base + range * 7), + ("dist", base + range * 8) + ]; + + let path = os::getcwd().to_str(); + + let mut final_base = base; + + for bases.iter().advance |&(dir, base)| { + if path.contains(dir) { + final_base = base; + break; + } + } + + return final_base; +} + /// Get a constant that represents the number of times to repeat /// stress tests. Default 1. pub fn stress_factor() -> uint { diff --git a/src/rt/rust_test_helpers.cpp b/src/rt/rust_test_helpers.cpp index d82c39d6838ec..2cfd5cf1eb63c 100644 --- a/src/rt/rust_test_helpers.cpp +++ b/src/rt/rust_test_helpers.cpp @@ -168,11 +168,11 @@ rust_dbg_extern_identity_TwoDoubles(TwoDoubles u) { // Generates increasing port numbers for network testing extern "C" CDECL uintptr_t -rust_dbg_next_port() { +rust_dbg_next_port(uintptr_t base_port) { static lock_and_signal dbg_port_lock; - static uintptr_t next_port = 9600; + static uintptr_t next_offset = 0; scoped_lock with(dbg_port_lock); - uintptr_t this_port = next_port; - next_port += 1; + uintptr_t this_port = base_port + next_offset; + next_offset += 1; return this_port; } From 7826651335b81f4630aa4d0d43a5de95b96d2311 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 8 Jul 2013 17:22:51 -0700 Subject: [PATCH 105/111] Tidy --- src/libstd/rt/borrowck.rs | 8 ++++---- src/libstd/rt/global_heap.rs | 2 +- src/libstd/rt/uv/net.rs | 14 +++++++++----- src/libstd/vec.rs | 6 ++---- 4 files changed, 16 insertions(+), 14 deletions(-) diff --git a/src/libstd/rt/borrowck.rs b/src/libstd/rt/borrowck.rs index e057f6e963714..60df2d5c11ba2 100644 --- a/src/libstd/rt/borrowck.rs +++ b/src/libstd/rt/borrowck.rs @@ -147,15 +147,15 @@ impl DebugPrints for io::fd_t { fn write_hex(&self, mut i: uint) { let letters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']; - static uint_nibbles: uint = ::uint::bytes << 1; - let mut buffer = [0_u8, ..uint_nibbles+1]; - let mut c = uint_nibbles; + static UINT_NIBBLES: uint = ::uint::bytes << 1; + let mut buffer = [0_u8, ..UINT_NIBBLES+1]; + let mut c = UINT_NIBBLES; while c > 0 { c -= 1; buffer[c] = letters[i & 0xF] as u8; i >>= 4; } - self.write(buffer.slice(0, uint_nibbles)); + self.write(buffer.slice(0, UINT_NIBBLES)); } unsafe fn write_cstr(&self, p: *c_char) { diff --git a/src/libstd/rt/global_heap.rs b/src/libstd/rt/global_heap.rs index c9bf3b2853039..53634f0cf86e4 100644 --- a/src/libstd/rt/global_heap.rs +++ b/src/libstd/rt/global_heap.rs @@ -80,7 +80,7 @@ pub unsafe fn exchange_malloc(td: *c_char, size: uintptr_t) -> *c_char { #[cfg(not(stage0), not(test))] #[lang="exchange_malloc"] #[inline] -pub unsafe fn exchange_malloc(align: u32, size: uintptr_t) -> *c_char { +pub unsafe fn exchange_malloc(_align: u32, size: uintptr_t) -> *c_char { malloc_raw(size as uint) as *c_char } diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index 2707b9ce7d7b6..a736d54d62a5f 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -97,7 +97,7 @@ fn uv_ip_as_ip(addr: UvIpAddr, f: &fn(IpAddr) -> T) -> T { let ip_str = str::from_bytes_slice(ip_name).trim_right_chars(&'\x00'); let ip = match addr { UvIpv4(*) => { - let ip: ~[u8] = + let ip: ~[u8] = ip_str.split_iter('.') .transform(|s: &str| -> u8 { FromStr::from_str(s).unwrap() }) .collect(); @@ -288,8 +288,10 @@ impl TcpWatcher { rtdebug!("connect_t: %x", connect_handle as uint); do ip_as_uv_ip(address) |addr| { let result = match addr { - UvIpv4(addr) => uvll::tcp_connect(connect_handle, self.native_handle(), addr, connect_cb), - UvIpv6(addr) => uvll::tcp_connect6(connect_handle, self.native_handle(), addr, connect_cb), + UvIpv4(addr) => uvll::tcp_connect(connect_handle, + self.native_handle(), addr, connect_cb), + UvIpv6(addr) => uvll::tcp_connect6(connect_handle, + self.native_handle(), addr, connect_cb), }; assert_eq!(0, result); } @@ -416,8 +418,10 @@ impl UdpWatcher { do ip_as_uv_ip(address) |addr| { let result = unsafe { match addr { - UvIpv4(addr) => uvll::udp_send(req.native_handle(), self.native_handle(), [buf], addr, send_cb), - UvIpv6(addr) => uvll::udp_send6(req.native_handle(), self.native_handle(), [buf], addr, send_cb), + UvIpv4(addr) => uvll::udp_send(req.native_handle(), + self.native_handle(), [buf], addr, send_cb), + UvIpv6(addr) => uvll::udp_send6(req.native_handle(), + self.native_handle(), [buf], addr, send_cb), } }; assert_eq!(0, result); diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 2c1e913511eff..1188cfbe59507 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -32,14 +32,12 @@ use sys::size_of; use uint; use unstable::intrinsics; #[cfg(stage0)] -use intrinsic::{get_tydesc, TyDesc}; +use intrinsic::{get_tydesc}; #[cfg(not(stage0))] -use unstable::intrinsics::{get_tydesc, contains_managed, TyDesc}; +use unstable::intrinsics::{get_tydesc, contains_managed}; use vec; use util; -#[cfg(not(test))] use cmp::Equiv; - /// Returns true if two vectors have the same length pub fn same_length(xs: &[T], ys: &[U]) -> bool { xs.len() == ys.len() From 29c9443d854073feeceb7ec2afa5841d9d1242af Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 8 Jul 2013 18:06:17 -0700 Subject: [PATCH 106/111] std: Add a yield implementation for newsched --- src/libstd/task/mod.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index ae8f1c2101dec..5a3ff10ae83e6 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -497,11 +497,26 @@ pub fn try(f: ~fn() -> T) -> Result { pub fn yield() { //! Yield control to the task scheduler + use rt::{context, OldTaskContext}; + use rt::local::Local; + use rt::sched::Scheduler; + unsafe { - let task_ = rt::rust_get_task(); - let killed = rt::rust_task_yield(task_); - if killed && !failing() { - fail!("killed"); + match context() { + OldTaskContext => { + let task_ = rt::rust_get_task(); + let killed = rt::rust_task_yield(task_); + if killed && !failing() { + fail!("killed"); + } + } + _ => { + // XXX: What does yield really mean in newsched? + let sched = Local::take::(); + do sched.deschedule_running_task_and_then |sched, task| { + sched.enqueue_task(task); + } + } } } } From ec6d4a1733b07d4cc561eda7463ad596a9d52bf0 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 9 Jul 2013 11:05:57 -0700 Subject: [PATCH 107/111] std::rt: size_t, not u64 --- src/libstd/rt/uv/net.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index a736d54d62a5f..b7caba849b7e1 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -81,8 +81,8 @@ fn uv_ip_as_ip(addr: UvIpAddr, f: &fn(IpAddr) -> T) -> T { let buf = vec::from_elem(ip_size + 1 /*null terminated*/, 0u8); unsafe { match addr { - UvIpv4(addr) => uvll::ip4_name(addr, vec::raw::to_ptr(buf), ip_size as u64), - UvIpv6(addr) => uvll::ip6_name(addr, vec::raw::to_ptr(buf), ip_size as u64), + UvIpv4(addr) => uvll::ip4_name(addr, vec::raw::to_ptr(buf), ip_size as size_t), + UvIpv6(addr) => uvll::ip6_name(addr, vec::raw::to_ptr(buf), ip_size as size_t), } }; buf From 07e52eb7fc75466d294a1fd9d614f5e0276ab834 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 9 Jul 2013 13:29:05 -0700 Subject: [PATCH 108/111] std: Make os::set_exit_status work with newsched --- src/libstd/os.rs | 11 +++++++++-- src/libstd/rt/mod.rs | 10 +++++++++- src/libstd/rt/util.rs | 22 ++++++++++++++++++++++ src/rt/rust_builtin.cpp | 15 +++++++++++++++ src/rt/rustrt.def.in | 2 ++ 5 files changed, 57 insertions(+), 3 deletions(-) diff --git a/src/libstd/os.rs b/src/libstd/os.rs index c26020dd06d57..5c78b7c14e533 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -1134,8 +1134,15 @@ pub fn last_os_error() -> ~str { * ignored and the process exits with the default failure status */ pub fn set_exit_status(code: int) { - unsafe { - rustrt::rust_set_exit_status(code as libc::intptr_t); + use rt; + use rt::OldTaskContext; + + if rt::context() == OldTaskContext { + unsafe { + rustrt::rust_set_exit_status(code as libc::intptr_t); + } + } else { + rt::util::set_exit_status(code); } } diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index b70549c266a1e..51f4737ef85fb 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -260,7 +260,15 @@ pub fn run(main: ~fn()) -> int { } unsafe { - let exit_code = if exit_success { 0 } else { DEFAULT_ERROR_CODE }; + let exit_code = if exit_success { + use rt::util; + + // If we're exiting successfully, then return the global + // exit status, which can be set programmatically. + util::get_exit_status() + } else { + DEFAULT_ERROR_CODE + }; (*exit_code_clone.get()).store(exit_code, SeqCst); } }; diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 5219ae1d5406d..a1169954688b0 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -97,3 +97,25 @@ memory and partly incapable of presentation to others.", unsafe { libc::abort(); } } + +pub fn set_exit_status(code: int) { + + unsafe { + return rust_set_exit_status_newrt(code as libc::uintptr_t); + } + + extern { + fn rust_set_exit_status_newrt(code: libc::uintptr_t); + } +} + +pub fn get_exit_status() -> int { + + unsafe { + return rust_get_exit_status_newrt() as int; + } + + extern { + fn rust_get_exit_status_newrt() -> libc::uintptr_t; + } +} \ No newline at end of file diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 8dc773a4d39c5..caa2b53b3dbe3 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -960,6 +960,21 @@ rust_get_global_args_ptr() { return &global_args_ptr; } +static lock_and_signal exit_status_lock; +static uintptr_t exit_status = 0; + +extern "C" CDECL void +rust_set_exit_status_newrt(uintptr_t code) { + scoped_lock with(exit_status_lock); + exit_status = code; +} + +extern "C" CDECL uintptr_t +rust_get_exit_status_newrt() { + scoped_lock with(exit_status_lock); + return exit_status; +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index cd207a0a058e1..ea614330866fe 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -270,3 +270,5 @@ rust_get_global_args_ptr rust_current_boxed_region rust_take_global_args_lock rust_drop_global_args_lock +rust_set_exit_status_newrt +rust_get_exit_status_newrt \ No newline at end of file From 2c1315719da842b8d5fcd5e59faf58f2bb408765 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 9 Jul 2013 15:05:43 -0700 Subject: [PATCH 109/111] rt: Make the old rand builtins work with newsched --- src/rt/rust_builtin.cpp | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index caa2b53b3dbe3..4a5fcf3c60432 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -86,15 +86,10 @@ rand_gen_seed(uint8_t* dest, size_t size) { extern "C" CDECL void * rand_new_seeded(uint8_t* seed, size_t seed_size) { - rust_task *task = rust_get_current_task(); - rust_rng *rng = (rust_rng *) task->malloc(sizeof(rust_rng), - "rand_new_seeded"); - if (!rng) { - task->fail(); - return NULL; - } - char *env_seed = task->kernel->env->rust_seed; - rng_init(rng, env_seed, seed, seed_size); + assert(seed != NULL); + rust_rng *rng = (rust_rng *) malloc(sizeof(rust_rng)); + assert(rng != NULL && "rng alloc failed"); + rng_init(rng, NULL, seed, seed_size); return rng; } @@ -105,8 +100,7 @@ rand_next(rust_rng *rng) { extern "C" CDECL void rand_free(rust_rng *rng) { - rust_task *task = rust_get_current_task(); - task->free(rng); + free(rng); } From 6fb92f8cab03824a52da6ad23060fe791928dcc9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 9 Jul 2013 15:48:13 -0700 Subject: [PATCH 110/111] std::rt: Do local tests in a bare thread to not interfere with the scheduler --- src/libstd/rt/local.rs | 51 +++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index 374933ab281b8..b47bbf3edf0bb 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -103,46 +103,55 @@ impl Local for IoFactoryObject { #[cfg(test)] mod test { + use unstable::run_in_bare_thread; use rt::test::*; use rt::sched::Scheduler; use super::*; #[test] fn thread_local_scheduler_smoke_test() { - let scheduler = ~new_test_uv_sched(); - Local::put(scheduler); - let _scheduler: ~Scheduler = Local::take(); + do run_in_bare_thread { + let scheduler = ~new_test_uv_sched(); + Local::put(scheduler); + let _scheduler: ~Scheduler = Local::take(); + } } #[test] fn thread_local_scheduler_two_instances() { - let scheduler = ~new_test_uv_sched(); - Local::put(scheduler); - let _scheduler: ~Scheduler = Local::take(); - let scheduler = ~new_test_uv_sched(); - Local::put(scheduler); - let _scheduler: ~Scheduler = Local::take(); + do run_in_bare_thread { + let scheduler = ~new_test_uv_sched(); + Local::put(scheduler); + let _scheduler: ~Scheduler = Local::take(); + let scheduler = ~new_test_uv_sched(); + Local::put(scheduler); + let _scheduler: ~Scheduler = Local::take(); + } } #[test] fn borrow_smoke_test() { - let scheduler = ~new_test_uv_sched(); - Local::put(scheduler); - unsafe { - let _scheduler: *mut Scheduler = Local::unsafe_borrow(); + do run_in_bare_thread { + let scheduler = ~new_test_uv_sched(); + Local::put(scheduler); + unsafe { + let _scheduler: *mut Scheduler = Local::unsafe_borrow(); + } + let _scheduler: ~Scheduler = Local::take(); } - let _scheduler: ~Scheduler = Local::take(); } #[test] fn borrow_with_return() { - let scheduler = ~new_test_uv_sched(); - Local::put(scheduler); - let res = do Local::borrow:: |_sched| { - true - }; - assert!(res) - let _scheduler: ~Scheduler = Local::take(); + do run_in_bare_thread { + let scheduler = ~new_test_uv_sched(); + Local::put(scheduler); + let res = do Local::borrow:: |_sched| { + true + }; + assert!(res); + let _scheduler: ~Scheduler = Local::take(); + } } } From 413d51e32debf0c3f7dda2434b64d73585df21ef Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Tue, 9 Jul 2013 16:54:48 -0700 Subject: [PATCH 111/111] std::rt: Ignore 0-byte udp reads --- src/libstd/rt/uv/net.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/libstd/rt/uv/net.rs b/src/libstd/rt/uv/net.rs index b7caba849b7e1..6d096f9885a7d 100644 --- a/src/libstd/rt/uv/net.rs +++ b/src/libstd/rt/uv/net.rs @@ -392,6 +392,13 @@ impl UdpWatcher { extern fn recv_cb(handle: *uvll::uv_udp_t, nread: ssize_t, buf: Buf, addr: *uvll::sockaddr, flags: c_uint) { + // When there's no data to read the recv callback can be a no-op. + // This can happen if read returns EAGAIN/EWOULDBLOCK. By ignoring + // this we just drop back to kqueue and wait for the next callback. + if nread == 0 { + return; + } + rtdebug!("buf addr: %x", buf.base as uint); rtdebug!("buf len: %d", buf.len as int); let mut udp_watcher: UdpWatcher = NativeHandle::from_native_handle(handle);