|
| 1 | +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT |
| 2 | +// file at the top-level directory of this distribution and at |
| 3 | +// http://rust-lang.org/COPYRIGHT. |
| 4 | +// |
| 5 | +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
| 6 | +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
| 7 | +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your |
| 8 | +// option. This file may not be copied, modified, or distributed |
| 9 | +// except according to those terms. |
| 10 | + |
| 11 | +//! Task death: asynchronous killing, linked failure, exit code propagation. |
| 12 | +
|
| 13 | +use cell::Cell; |
| 14 | +use option::{Option, Some, None}; |
| 15 | +use prelude::*; |
| 16 | +use unstable::sync::{UnsafeAtomicRcBox, LittleLock}; |
| 17 | +use util; |
| 18 | + |
| 19 | +// FIXME(#7544)(bblum): think about the cache efficiency of this |
| 20 | +struct KillHandleInner { |
| 21 | + // ((more fields to be added in a future commit)) |
| 22 | + |
| 23 | + // Shared state between task and children for exit code propagation. These |
| 24 | + // are here so we can re-use the kill handle to implement watched children |
| 25 | + // tasks. Using a separate ARClike would introduce extra atomic adds/subs |
| 26 | + // into common spawn paths, so this is just for speed. |
| 27 | + |
| 28 | + // Locklessly accessed; protected by the enclosing refcount's barriers. |
| 29 | + any_child_failed: bool, |
| 30 | + // A lazy list, consuming which may unwrap() many child tombstones. |
| 31 | + child_tombstones: Option<~fn() -> bool>, |
| 32 | + // Protects multiple children simultaneously creating tombstones. |
| 33 | + graveyard_lock: LittleLock, |
| 34 | +} |
| 35 | + |
| 36 | +/// State shared between tasks used for task killing during linked failure. |
| 37 | +#[deriving(Clone)] |
| 38 | +pub struct KillHandle(UnsafeAtomicRcBox<KillHandleInner>); |
| 39 | + |
| 40 | +impl KillHandle { |
| 41 | + pub fn new() -> KillHandle { |
| 42 | + KillHandle(UnsafeAtomicRcBox::new(KillHandleInner { |
| 43 | + // Linked failure fields |
| 44 | + // ((none yet)) |
| 45 | + // Exit code propagation fields |
| 46 | + any_child_failed: false, |
| 47 | + child_tombstones: None, |
| 48 | + graveyard_lock: LittleLock(), |
| 49 | + })) |
| 50 | + } |
| 51 | + |
| 52 | + pub fn notify_immediate_failure(&mut self) { |
| 53 | + // A benign data race may happen here if there are failing sibling |
| 54 | + // tasks that were also spawned-watched. The refcount's write barriers |
| 55 | + // in UnsafeAtomicRcBox ensure that this write will be seen by the |
| 56 | + // unwrapper/destructor, whichever task may unwrap it. |
| 57 | + unsafe { (*self.get()).any_child_failed = true; } |
| 58 | + } |
| 59 | + |
| 60 | + // For use when a task does not need to collect its children's exit |
| 61 | + // statuses, but the task has a parent which might want them. |
| 62 | + pub fn reparent_children_to(self, parent: &mut KillHandle) { |
| 63 | + // Optimistic path: If another child of the parent's already failed, |
| 64 | + // we don't need to worry about any of this. |
| 65 | + if unsafe { (*parent.get()).any_child_failed } { |
| 66 | + return; |
| 67 | + } |
| 68 | + |
| 69 | + // Try to see if all our children are gone already. |
| 70 | + match unsafe { self.try_unwrap() } { |
| 71 | + // Couldn't unwrap; children still alive. Reparent entire handle as |
| 72 | + // our own tombstone, to be unwrapped later. |
| 73 | + Left(this) => { |
| 74 | + let this = Cell::new(this); // :( |
| 75 | + do add_lazy_tombstone(parent) |other_tombstones| { |
| 76 | + let this = Cell::new(this.take()); // :( |
| 77 | + let others = Cell::new(other_tombstones); // :( |
| 78 | + || { |
| 79 | + // Prefer to check tombstones that were there first, |
| 80 | + // being "more fair" at the expense of tail-recursion. |
| 81 | + others.take().map_consume_default(true, |f| f()) && { |
| 82 | + let mut inner = unsafe { this.take().unwrap() }; |
| 83 | + (!inner.any_child_failed) && |
| 84 | + inner.child_tombstones.take_map_default(true, |f| f()) |
| 85 | + } |
| 86 | + } |
| 87 | + } |
| 88 | + } |
| 89 | + // Whether or not all children exited, one or more already failed. |
| 90 | + Right(KillHandleInner { any_child_failed: true, _ }) => { |
| 91 | + parent.notify_immediate_failure(); |
| 92 | + } |
| 93 | + // All children exited, but some left behind tombstones that we |
| 94 | + // don't want to wait on now. Give them to our parent. |
| 95 | + Right(KillHandleInner { any_child_failed: false, |
| 96 | + child_tombstones: Some(f), _ }) => { |
| 97 | + let f = Cell::new(f); // :( |
| 98 | + do add_lazy_tombstone(parent) |other_tombstones| { |
| 99 | + let f = Cell::new(f.take()); // :( |
| 100 | + let others = Cell::new(other_tombstones); // :( |
| 101 | + || { |
| 102 | + // Prefer fairness to tail-recursion, as in above case. |
| 103 | + others.take().map_consume_default(true, |f| f()) && |
| 104 | + f.take()() |
| 105 | + } |
| 106 | + } |
| 107 | + } |
| 108 | + // All children exited, none failed. Nothing to do! |
| 109 | + Right(KillHandleInner { any_child_failed: false, |
| 110 | + child_tombstones: None, _ }) => { } |
| 111 | + } |
| 112 | + |
| 113 | + // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule. |
| 114 | + fn add_lazy_tombstone(parent: &mut KillHandle, |
| 115 | + blk: &fn(Option<~fn() -> bool>) -> ~fn() -> bool) { |
| 116 | + |
| 117 | + let inner: &mut KillHandleInner = unsafe { &mut *parent.get() }; |
| 118 | + unsafe { |
| 119 | + do inner.graveyard_lock.lock { |
| 120 | + // Update the current "head node" of the lazy list. |
| 121 | + inner.child_tombstones = |
| 122 | + Some(blk(util::replace(&mut inner.child_tombstones, None))); |
| 123 | + } |
| 124 | + } |
| 125 | + } |
| 126 | + } |
| 127 | +} |
| 128 | + |
0 commit comments