diff --git a/src/libcore/atomics.rs b/src/libcore/atomics.rs new file mode 100644 index 0000000000000..9edce3bd6756e --- /dev/null +++ b/src/libcore/atomics.rs @@ -0,0 +1,791 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Core atomic primitives + +use intrinsics; +use std::kinds::marker; +use ty::Unsafe; + +/// An atomic boolean type. +pub struct AtomicBool { + v: Unsafe, + nocopy: marker::NoCopy +} + +/// A signed atomic integer type, supporting basic atomic arithmetic operations +pub struct AtomicInt { + v: Unsafe, + nocopy: marker::NoCopy +} + +/// An unsigned atomic integer type, supporting basic atomic arithmetic operations +pub struct AtomicUint { + v: Unsafe, + nocopy: marker::NoCopy +} + +/// An unsafe atomic pointer. Only supports basic atomic operations +pub struct AtomicPtr { + p: Unsafe, + nocopy: marker::NoCopy +} + +/// Atomic memory orderings +/// +/// Memory orderings limit the ways that both the compiler and CPU may reorder +/// instructions around atomic operations. At its most restrictive, +/// "sequentially consistent" atomics allow neither reads nor writes +/// to be moved either before or after the atomic operation; on the other end +/// "relaxed" atomics allow all reorderings. +/// +/// Rust's memory orderings are the same as in C++[1]. +/// +/// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync +pub enum Ordering { + /// No ordering constraints, only atomic operations + Relaxed, + /// When coupled with a store, all previous writes become visible + /// to another thread that performs a load with `Acquire` ordering + /// on the same value + Release, + /// When coupled with a load, all subsequent loads will see data + /// written before a store with `Release` ordering on the same value + /// in another thread + Acquire, + /// When coupled with a load, uses `Acquire` ordering, and with a store + /// `Release` ordering + AcqRel, + /// Like `AcqRel` with the additional guarantee that all threads see all + /// sequentially consistent operations in the same order. + SeqCst +} + +/// An `AtomicBool` initialized to `false` +pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: Unsafe{value: 0, + marker1: marker::InvariantType}, + nocopy: marker::NoCopy }; +/// An `AtomicInt` initialized to `0` +pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: Unsafe{value: 0, + marker1: marker::InvariantType}, + nocopy: marker::NoCopy }; +/// An `AtomicUint` initialized to `0` +pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: Unsafe{value: 0, + marker1: marker::InvariantType}, + nocopy: marker::NoCopy }; + +// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly +static UINT_TRUE: uint = -1; + +impl AtomicBool { + /// Create a new `AtomicBool` + pub fn new(v: bool) -> AtomicBool { + let val = if v { UINT_TRUE } else { 0 }; + AtomicBool { v: Unsafe::new(val), nocopy: marker::NoCopy } + } + + /// Load the value + #[inline] + pub fn load(&self, order: Ordering) -> bool { + unsafe { atomic_load(self.v.get() as *uint, order) > 0 } + } + + /// Store the value + #[inline] + pub fn store(&self, val: bool, order: Ordering) { + let val = if val { UINT_TRUE } else { 0 }; + + unsafe { atomic_store(self.v.get(), val, order); } + } + + /// Store a value, returning the old value + #[inline] + pub fn swap(&self, val: bool, order: Ordering) -> bool { + let val = if val { UINT_TRUE } else { 0 }; + + unsafe { atomic_swap(self.v.get(), val, order) > 0 } + } + + /// If the current value is the same as expected, store a new value + /// + /// Compare the current value with `old`; if they are the same then + /// replace the current value with `new`. Return the previous value. + /// If the return value is equal to `old` then the value was updated. + /// + /// # Examples + /// + /// ```ignore + /// # // FIXME: Needs PR #12430 + /// extern crate sync; + /// + /// use sync::Arc; + /// use std::sync::atomics::{AtomicBool, SeqCst}; + /// + /// fn main() { + /// let spinlock = Arc::new(AtomicBool::new(false)); + /// let spinlock_clone = spin_lock.clone(); + /// + /// spawn(proc() { + /// with_lock(&spinlock, || println!("task 1 in lock")); + /// }); + /// + /// spawn(proc() { + /// with_lock(&spinlock_clone, || println!("task 2 in lock")); + /// }); + /// } + /// + /// fn with_lock(spinlock: &Arc, f: || -> ()) { + /// // CAS loop until we are able to replace `false` with `true` + /// while spinlock.compare_and_swap(false, true, SeqCst) == false { + /// // Since tasks may not be preemptive (if they are green threads) + /// // yield to the scheduler to let the other task run. Low level + /// // concurrent code needs to take into account Rust's two threading + /// // models. + /// deschedule(); + /// } + /// + /// // Now we have the spinlock + /// f(); + /// + /// // Release the lock + /// spinlock.store(false); + /// } + /// ``` + #[inline] + pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool { + let old = if old { UINT_TRUE } else { 0 }; + let new = if new { UINT_TRUE } else { 0 }; + + unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 } + } + + /// A logical "and" operation + /// + /// Performs a logical "and" operation on the current value and the + /// argument `val`, and sets the new value to the result. + /// Returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicBool, SeqCst}; + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_and(false, SeqCst)); + /// assert_eq!(false, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_and(true, SeqCst)); + /// assert_eq!(true, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(false); + /// assert_eq!(false, foo.fetch_and(false, SeqCst)); + /// assert_eq!(false, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { + let val = if val { UINT_TRUE } else { 0 }; + + unsafe { atomic_and(self.v.get(), val, order) > 0 } + } + + /// A logical "nand" operation + /// + /// Performs a logical "nand" operation on the current value and the + /// argument `val`, and sets the new value to the result. + /// Returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicBool, SeqCst}; + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_nand(false, SeqCst)); + /// assert_eq!(true, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_nand(true, SeqCst)); + /// assert_eq!(0, foo.load(SeqCst) as int); + /// assert_eq!(false, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(false); + /// assert_eq!(false, foo.fetch_nand(false, SeqCst)); + /// assert_eq!(true, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { + let val = if val { UINT_TRUE } else { 0 }; + + unsafe { atomic_nand(self.v.get(), val, order) > 0 } + } + + /// A logical "or" operation + /// + /// Performs a logical "or" operation on the current value and the + /// argument `val`, and sets the new value to the result. + /// Returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicBool, SeqCst}; + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_or(false, SeqCst)); + /// assert_eq!(true, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_or(true, SeqCst)); + /// assert_eq!(true, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(false); + /// assert_eq!(false, foo.fetch_or(false, SeqCst)); + /// assert_eq!(false, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { + let val = if val { UINT_TRUE } else { 0 }; + + unsafe { atomic_or(self.v.get(), val, order) > 0 } + } + + /// A logical "xor" operation + /// + /// Performs a logical "xor" operation on the current value and the + /// argument `val`, and sets the new value to the result. + /// Returns the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicBool, SeqCst}; + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_xor(false, SeqCst)); + /// assert_eq!(true, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(true); + /// assert_eq!(true, foo.fetch_xor(true, SeqCst)); + /// assert_eq!(false, foo.load(SeqCst)); + /// + /// let foo = AtomicBool::new(false); + /// assert_eq!(false, foo.fetch_xor(false, SeqCst)); + /// assert_eq!(false, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { + let val = if val { UINT_TRUE } else { 0 }; + + unsafe { atomic_xor(self.v.get(), val, order) > 0 } + } +} + +impl AtomicInt { + /// Create a new `AtomicInt` + pub fn new(v: int) -> AtomicInt { + AtomicInt {v: Unsafe::new(v), nocopy: marker::NoCopy} + } + + /// Load the value + #[inline] + pub fn load(&self, order: Ordering) -> int { + unsafe { atomic_load(self.v.get() as *int, order) } + } + + /// Store the value + #[inline] + pub fn store(&self, val: int, order: Ordering) { + unsafe { atomic_store(self.v.get(), val, order); } + } + + /// Store a value, returning the old value + #[inline] + pub fn swap(&self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(self.v.get(), val, order) } + } + + /// If the current value is the same as expected, store a new value + /// + /// Compare the current value with `old`; if they are the same then + /// replace the current value with `new`. Return the previous value. + /// If the return value is equal to `old` then the value was updated. + #[inline] + pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } + } + + /// Add to the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicInt, SeqCst}; + /// + /// let foo = AtomicInt::new(0); + /// assert_eq!(0, foo.fetch_add(10, SeqCst)); + /// assert_eq!(10, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_add(&self, val: int, order: Ordering) -> int { + unsafe { atomic_add(self.v.get(), val, order) } + } + + /// Subtract from the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicInt, SeqCst}; + /// + /// let foo = AtomicInt::new(0); + /// assert_eq!(0, foo.fetch_sub(10, SeqCst)); + /// assert_eq!(-10, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(self.v.get(), val, order) } + } + + /// Bitwise and with the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0b101101); + /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst)); + /// assert_eq!(0b100001, foo.load(SeqCst)); + #[inline] + pub fn fetch_and(&self, val: int, order: Ordering) -> int { + unsafe { atomic_and(self.v.get(), val, order) } + } + + /// Bitwise or with the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0b101101); + /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst)); + /// assert_eq!(0b111111, foo.load(SeqCst)); + #[inline] + pub fn fetch_or(&self, val: int, order: Ordering) -> int { + unsafe { atomic_or(self.v.get(), val, order) } + } + + /// Bitwise xor with the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0b101101); + /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst)); + /// assert_eq!(0b011110, foo.load(SeqCst)); + #[inline] + pub fn fetch_xor(&self, val: int, order: Ordering) -> int { + unsafe { atomic_xor(self.v.get(), val, order) } + } +} + +impl AtomicUint { + /// Create a new `AtomicUint` + pub fn new(v: uint) -> AtomicUint { + AtomicUint { v: Unsafe::new(v), nocopy: marker::NoCopy } + } + + /// Load the value + #[inline] + pub fn load(&self, order: Ordering) -> uint { + unsafe { atomic_load(self.v.get() as *uint, order) } + } + + /// Store the value + #[inline] + pub fn store(&self, val: uint, order: Ordering) { + unsafe { atomic_store(self.v.get(), val, order); } + } + + /// Store a value, returning the old value + #[inline] + pub fn swap(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(self.v.get(), val, order) } + } + + /// If the current value is the same as expected, store a new value + /// + /// Compare the current value with `old`; if they are the same then + /// replace the current value with `new`. Return the previous value. + /// If the return value is equal to `old` then the value was updated. + #[inline] + pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } + } + + /// Add to the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0); + /// assert_eq!(0, foo.fetch_add(10, SeqCst)); + /// assert_eq!(10, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(self.v.get(), val, order) } + } + + /// Subtract from the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(10); + /// assert_eq!(10, foo.fetch_sub(10, SeqCst)); + /// assert_eq!(0, foo.load(SeqCst)); + /// ``` + #[inline] + pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(self.v.get(), val, order) } + } + + /// Bitwise and with the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0b101101); + /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst)); + /// assert_eq!(0b100001, foo.load(SeqCst)); + #[inline] + pub fn fetch_and(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_and(self.v.get(), val, order) } + } + + /// Bitwise or with the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0b101101); + /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst)); + /// assert_eq!(0b111111, foo.load(SeqCst)); + #[inline] + pub fn fetch_or(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_or(self.v.get(), val, order) } + } + + /// Bitwise xor with the current value, returning the previous + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomics::{AtomicUint, SeqCst}; + /// + /// let foo = AtomicUint::new(0b101101); + /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst)); + /// assert_eq!(0b011110, foo.load(SeqCst)); + #[inline] + pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_xor(self.v.get(), val, order) } + } +} + +impl AtomicPtr { + /// Create a new `AtomicPtr` + pub fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p: Unsafe::new(p as uint), nocopy: marker::NoCopy } + } + + /// Load the value + #[inline] + pub fn load(&self, order: Ordering) -> *mut T { + unsafe { + atomic_load(self.p.get() as **mut T, order) as *mut T + } + } + + /// Store the value + #[inline] + pub fn store(&self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(self.p.get(), ptr as uint, order); } + } + + /// Store a value, returning the old value + #[inline] + pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(self.p.get(), ptr as uint, order) as *mut T } + } + + /// If the current value is the same as expected, store a new value + /// + /// Compare the current value with `old`; if they are the same then + /// replace the current value with `new`. Return the previous value. + /// If the return value is equal to `old` then the value was updated. + #[inline] + pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + unsafe { + atomic_compare_and_swap(self.p.get(), old as uint, + new as uint, order) as *mut T + } + } +} + +#[inline] +unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { + match order { + Release => intrinsics::atomic_store_rel(dst, val), + Relaxed => intrinsics::atomic_store_relaxed(dst, val), + _ => intrinsics::atomic_store(dst, val) + } +} + +#[inline] +unsafe fn atomic_load(dst: *T, order:Ordering) -> T { + match order { + Acquire => intrinsics::atomic_load_acq(dst), + Relaxed => intrinsics::atomic_load_relaxed(dst), + _ => intrinsics::atomic_load(dst) + } +} + +#[inline] +unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xchg_acq(dst, val), + Release => intrinsics::atomic_xchg_rel(dst, val), + AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), + _ => intrinsics::atomic_xchg(dst, val) + } +} + +/// Returns the old value (like __sync_fetch_and_add). +#[inline] +unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xadd_acq(dst, val), + Release => intrinsics::atomic_xadd_rel(dst, val), + AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), + Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), + _ => intrinsics::atomic_xadd(dst, val) + } +} + +/// Returns the old value (like __sync_fetch_and_sub). +#[inline] +unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xsub_acq(dst, val), + Release => intrinsics::atomic_xsub_rel(dst, val), + AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), + Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), + _ => intrinsics::atomic_xsub(dst, val) + } +} + +#[inline] +unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), + Release => intrinsics::atomic_cxchg_rel(dst, old, new), + AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), + Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), + _ => intrinsics::atomic_cxchg(dst, old, new), + } +} + +#[inline] +unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_and_acq(dst, val), + Release => intrinsics::atomic_and_rel(dst, val), + AcqRel => intrinsics::atomic_and_acqrel(dst, val), + Relaxed => intrinsics::atomic_and_relaxed(dst, val), + _ => intrinsics::atomic_and(dst, val) + } +} + +#[inline] +unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_nand_acq(dst, val), + Release => intrinsics::atomic_nand_rel(dst, val), + AcqRel => intrinsics::atomic_nand_acqrel(dst, val), + Relaxed => intrinsics::atomic_nand_relaxed(dst, val), + _ => intrinsics::atomic_nand(dst, val) + } +} + + +#[inline] +unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_or_acq(dst, val), + Release => intrinsics::atomic_or_rel(dst, val), + AcqRel => intrinsics::atomic_or_acqrel(dst, val), + Relaxed => intrinsics::atomic_or_relaxed(dst, val), + _ => intrinsics::atomic_or(dst, val) + } +} + + +#[inline] +unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xor_acq(dst, val), + Release => intrinsics::atomic_xor_rel(dst, val), + AcqRel => intrinsics::atomic_xor_acqrel(dst, val), + Relaxed => intrinsics::atomic_xor_relaxed(dst, val), + _ => intrinsics::atomic_xor(dst, val) + } +} + + +/// An atomic fence. +/// +/// A fence 'A' which has `Release` ordering semantics, synchronizes with a +/// fence 'B' with (at least) `Acquire` semantics, if and only if there exists +/// atomic operations X and Y, both operating on some atomic object 'M' such +/// that A is sequenced before X, Y is synchronized before B and Y observers +/// the change to M. This provides a happens-before dependence between A and B. +/// +/// Atomic operations with `Release` or `Acquire` semantics can also synchronize +/// with a fence. +/// +/// A fence with has `SeqCst` ordering, in addition to having both `Acquire` and +/// `Release` semantics, participates in the global program order of the other +/// `SeqCst` operations and/or fences. +/// +/// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. +/// +/// # Failure +/// +/// Fails if `order` is `Relaxed` +#[inline] +pub fn fence(order: Ordering) { + unsafe { + match order { + Acquire => intrinsics::atomic_fence_acq(), + Release => intrinsics::atomic_fence_rel(), + AcqRel => intrinsics::atomic_fence_acqrel(), + SeqCst => intrinsics::atomic_fence(), + Relaxed => fail!("there is no such thing as a relaxed fence") + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn bool_() { + let a = AtomicBool::new(false); + assert_eq!(a.compare_and_swap(false, true, SeqCst), false); + assert_eq!(a.compare_and_swap(false, true, SeqCst), true); + + a.store(false, SeqCst); + assert_eq!(a.compare_and_swap(false, true, SeqCst), false); + } + + #[test] + fn bool_and() { + let a = AtomicBool::new(true); + assert_eq!(a.fetch_and(false, SeqCst),true); + assert_eq!(a.load(SeqCst),false); + } + + #[test] + fn uint_and() { + let x = AtomicUint::new(0xf731); + assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); + } + + #[test] + fn uint_or() { + let x = AtomicUint::new(0xf731); + assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); + } + + #[test] + fn uint_xor() { + let x = AtomicUint::new(0xf731); + assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); + } + + #[test] + fn int_and() { + let x = AtomicInt::new(0xf731); + assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); + } + + #[test] + fn int_or() { + let x = AtomicInt::new(0xf731); + assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); + } + + #[test] + fn int_xor() { + let x = AtomicInt::new(0xf731); + assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); + } + + static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL; + static mut S_INT : AtomicInt = INIT_ATOMIC_INT; + static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT; + + #[test] + fn static_init() { + unsafe { + assert!(!S_BOOL.load(SeqCst)); + assert!(S_INT.load(SeqCst) == 0); + assert!(S_UINT.load(SeqCst) == 0); + } + } + + #[test] + fn different_sizes() { + unsafe { + let mut slot = 0u16; + assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); + + let mut slot = 0u8; + assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); + + let slot = 0u32; + assert_eq!(super::atomic_load(&slot, SeqCst), 0); + + let mut slot = 0u64; + super::atomic_store(&mut slot, 2, SeqCst); + } + } +} diff --git a/src/libcore/failure.rs b/src/libcore/failure.rs index 2296e663033cb..8a28f7b13928f 100644 --- a/src/libcore/failure.rs +++ b/src/libcore/failure.rs @@ -10,7 +10,7 @@ //! Failure support for libcore -#![allow(dead_code)] +#![allow(dead_code, missing_doc)] #[cfg(not(test))] use str::raw::c_str_to_static_slice; diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 97b086a093d2a..4eab7e9d45d35 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -9,6 +9,27 @@ // except according to those terms. //! The Rust core library +//! +//! This library is meant to represent the core functionality of rust that is +//! maximally portable to other platforms. To that exent, this library has no +//! knowledge of things like allocation, threads, I/O, etc. This library is +//! built on the assumption of a few existing symbols: +//! +//! * `memcpy`, `memcmp`, `memset` - These are core memory routines which are +//! often generated by LLVM. Additionally, this library can make explicit +//! calls to these funcitons. Their signatures are the same as found in C. +//! +//! * `rust_begin_unwind` - This function takes three arguments, a +//! `&fmt::Arguments`, a `&str`, and a `uint. These three arguments dictate +//! the failure message, the file at which failure was invoked, and the line. +//! It is up to consumers of this core library to define this failure +//! function; it is only required to never return. +//! +//! Currently, it is *not* recommended to use the core library. The stable +//! functionality of libcore is exported directly into the +//! [standard library](../std/index.html). The composition of this library is +//! subject to change over time, only the interface exposed through libstd is +//! intended to be stable. #![crate_id = "core#0.11.0-pre"] #![license = "MIT/ASL2"] @@ -81,9 +102,11 @@ pub mod container; mod unicode; mod unit; pub mod any; +pub mod atomics; pub mod bool; pub mod cell; pub mod char; +pub mod failure; pub mod finally; pub mod iter; pub mod option; @@ -96,13 +119,15 @@ pub mod tuple; #[cfg(stage0, not(test))] pub mod owned; -mod failure; - // FIXME: this module should not exist. Once owned allocations are no longer a // language type, this module can move outside to the owned allocation // crate. mod should_not_exist; +mod core { + pub use failure; +} + mod std { pub use clone; pub use cmp; diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index 50d5cd81ba00e..69be68a34a130 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -17,7 +17,7 @@ macro_rules! fail( fail!("explicit failure") ); ($msg:expr) => ( - ::failure::begin_unwind($msg, file!(), line!()) + ::core::failure::begin_unwind($msg, file!(), line!()) ); ) diff --git a/src/libcore/should_not_exist.rs b/src/libcore/should_not_exist.rs index 7ecf53a634893..9272f24da9d40 100644 --- a/src/libcore/should_not_exist.rs +++ b/src/libcore/should_not_exist.rs @@ -8,6 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// As noted by this file name, this file should not exist. This file should not +// exist because it performs allocations which libcore is not allowed to do. The +// reason for this file's existence is that the `~[T]` and `~str` types are +// language-defined types. Traits are defined in libcore, such as `Clone`, which +// these types need to implement, but the implementation can only be found in +// libcore. +// +// Plan of attack for solving this problem: +// +// 1. Implement DST +// 2. Make `Box` not a language feature +// 3. Move `Box` to a separate crate, liballoc. +// 4. Implement relevant trais in liballoc, not libcore +// +// Currently, no progress has been made on this list. + use char::Char; use clone::Clone; use container::Container; diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index 6ddae97e901ae..b2565a6a449e3 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -105,39 +105,15 @@ //! } //! ``` -#![allow(missing_doc)] - -use intrinsics; use mem; use ops::Drop; use option::{Option,Some,None}; use owned::Box; -use std::kinds::marker; -use ty::Unsafe; -/// An atomic boolean type. -pub struct AtomicBool { - v: Unsafe, - nocopy: marker::NoCopy -} - -/// A signed atomic integer type, supporting basic atomic arithmetic operations -pub struct AtomicInt { - v: Unsafe, - nocopy: marker::NoCopy -} - -/// An unsigned atomic integer type, supporting basic atomic arithmetic operations -pub struct AtomicUint { - v: Unsafe, - nocopy: marker::NoCopy -} - -/// An unsafe atomic pointer. Only supports basic atomic operations -pub struct AtomicPtr { - p: Unsafe, - nocopy: marker::NoCopy -} +pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr}; +pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst}; +pub use core::atomics::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT}; +pub use core::atomics::fence; /// An atomic, nullable unique pointer /// @@ -145,544 +121,26 @@ pub struct AtomicPtr { /// owned heap objects across tasks. #[unsafe_no_drop_flag] pub struct AtomicOption { - p: Unsafe, -} - -/// Atomic memory orderings -/// -/// Memory orderings limit the ways that both the compiler and CPU may reorder -/// instructions around atomic operations. At its most restrictive, -/// "sequentially consistent" atomics allow neither reads nor writes -/// to be moved either before or after the atomic operation; on the other end -/// "relaxed" atomics allow all reorderings. -/// -/// Rust's memory orderings are the same as in C++[1]. -/// -/// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync -pub enum Ordering { - /// No ordering constraints, only atomic operations - Relaxed, - /// When coupled with a store, all previous writes become visible - /// to another thread that performs a load with `Acquire` ordering - /// on the same value - Release, - /// When coupled with a load, all subsequent loads will see data - /// written before a store with `Release` ordering on the same value - /// in another thread - Acquire, - /// When coupled with a load, uses `Acquire` ordering, and with a store - /// `Release` ordering - AcqRel, - /// Like `AcqRel` with the additional guarantee that all threads see all - /// sequentially consistent operations in the same order. - SeqCst -} - -/// An `AtomicBool` initialized to `false` -pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: Unsafe{value: 0, - marker1: marker::InvariantType}, - nocopy: marker::NoCopy }; -/// An `AtomicInt` initialized to `0` -pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: Unsafe{value: 0, - marker1: marker::InvariantType}, - nocopy: marker::NoCopy }; -/// An `AtomicUint` initialized to `0` -pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: Unsafe{value: 0, - marker1: marker::InvariantType}, - nocopy: marker::NoCopy }; - -// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly -static UINT_TRUE: uint = -1; - -impl AtomicBool { - /// Create a new `AtomicBool` - pub fn new(v: bool) -> AtomicBool { - let val = if v { UINT_TRUE } else { 0 }; - AtomicBool { v: Unsafe::new(val), nocopy: marker::NoCopy } - } - - /// Load the value - #[inline] - pub fn load(&self, order: Ordering) -> bool { - unsafe { atomic_load(self.v.get() as *uint, order) > 0 } - } - - /// Store the value - #[inline] - pub fn store(&self, val: bool, order: Ordering) { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_store(self.v.get(), val, order); } - } - - /// Store a value, returning the old value - #[inline] - pub fn swap(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_swap(self.v.get(), val, order) > 0 } - } - - /// If the current value is the same as expected, store a new value - /// - /// Compare the current value with `old`; if they are the same then - /// replace the current value with `new`. Return the previous value. - /// If the return value is equal to `old` then the value was updated. - /// - /// # Examples - /// - /// ```ignore - /// # // FIXME: Needs PR #12430 - /// extern crate sync; - /// - /// use sync::Arc; - /// use std::sync::atomics::{AtomicBool, SeqCst}; - /// - /// fn main() { - /// let spinlock = Arc::new(AtomicBool::new(false)); - /// let spinlock_clone = spin_lock.clone(); - /// - /// spawn(proc() { - /// with_lock(&spinlock, || println!("task 1 in lock")); - /// }); - /// - /// spawn(proc() { - /// with_lock(&spinlock_clone, || println!("task 2 in lock")); - /// }); - /// } - /// - /// fn with_lock(spinlock: &Arc, f: || -> ()) { - /// // CAS loop until we are able to replace `false` with `true` - /// while spinlock.compare_and_swap(false, true, SeqCst) == false { - /// // Since tasks may not be preemptive (if they are green threads) - /// // yield to the scheduler to let the other task run. Low level - /// // concurrent code needs to take into account Rust's two threading - /// // models. - /// deschedule(); - /// } - /// - /// // Now we have the spinlock - /// f(); - /// - /// // Release the lock - /// spinlock.store(false); - /// } - /// ``` - #[inline] - pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool { - let old = if old { UINT_TRUE } else { 0 }; - let new = if new { UINT_TRUE } else { 0 }; - - unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 } - } - - /// A logical "and" operation - /// - /// Performs a logical "and" operation on the current value and the - /// argument `val`, and sets the new value to the result. - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicBool, SeqCst}; - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_and(false, SeqCst)); - /// assert_eq!(false, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_and(true, SeqCst)); - /// assert_eq!(true, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(false); - /// assert_eq!(false, foo.fetch_and(false, SeqCst)); - /// assert_eq!(false, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_and(self.v.get(), val, order) > 0 } - } - - /// A logical "nand" operation - /// - /// Performs a logical "nand" operation on the current value and the - /// argument `val`, and sets the new value to the result. - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicBool, SeqCst}; - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_nand(false, SeqCst)); - /// assert_eq!(true, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_nand(true, SeqCst)); - /// assert_eq!(0, foo.load(SeqCst) as int); - /// assert_eq!(false, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(false); - /// assert_eq!(false, foo.fetch_nand(false, SeqCst)); - /// assert_eq!(true, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_nand(self.v.get(), val, order) > 0 } - } - - /// A logical "or" operation - /// - /// Performs a logical "or" operation on the current value and the - /// argument `val`, and sets the new value to the result. - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicBool, SeqCst}; - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_or(false, SeqCst)); - /// assert_eq!(true, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_or(true, SeqCst)); - /// assert_eq!(true, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(false); - /// assert_eq!(false, foo.fetch_or(false, SeqCst)); - /// assert_eq!(false, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_or(self.v.get(), val, order) > 0 } - } - - /// A logical "xor" operation - /// - /// Performs a logical "xor" operation on the current value and the - /// argument `val`, and sets the new value to the result. - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicBool, SeqCst}; - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_xor(false, SeqCst)); - /// assert_eq!(true, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(true); - /// assert_eq!(true, foo.fetch_xor(true, SeqCst)); - /// assert_eq!(false, foo.load(SeqCst)); - /// - /// let foo = AtomicBool::new(false); - /// assert_eq!(false, foo.fetch_xor(false, SeqCst)); - /// assert_eq!(false, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_xor(self.v.get(), val, order) > 0 } - } -} - -impl AtomicInt { - /// Create a new `AtomicInt` - pub fn new(v: int) -> AtomicInt { - AtomicInt {v: Unsafe::new(v), nocopy: marker::NoCopy} - } - - /// Load the value - #[inline] - pub fn load(&self, order: Ordering) -> int { - unsafe { atomic_load(self.v.get() as *int, order) } - } - - /// Store the value - #[inline] - pub fn store(&self, val: int, order: Ordering) { - unsafe { atomic_store(self.v.get(), val, order); } - } - - /// Store a value, returning the old value - #[inline] - pub fn swap(&self, val: int, order: Ordering) -> int { - unsafe { atomic_swap(self.v.get(), val, order) } - } - - /// If the current value is the same as expected, store a new value - /// - /// Compare the current value with `old`; if they are the same then - /// replace the current value with `new`. Return the previous value. - /// If the return value is equal to `old` then the value was updated. - #[inline] - pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { - unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } - } - - /// Add to the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicInt, SeqCst}; - /// - /// let foo = AtomicInt::new(0); - /// assert_eq!(0, foo.fetch_add(10, SeqCst)); - /// assert_eq!(10, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_add(&self, val: int, order: Ordering) -> int { - unsafe { atomic_add(self.v.get(), val, order) } - } - - /// Subtract from the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicInt, SeqCst}; - /// - /// let foo = AtomicInt::new(0); - /// assert_eq!(0, foo.fetch_sub(10, SeqCst)); - /// assert_eq!(-10, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_sub(&self, val: int, order: Ordering) -> int { - unsafe { atomic_sub(self.v.get(), val, order) } - } - - /// Bitwise and with the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0b101101); - /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst)); - /// assert_eq!(0b100001, foo.load(SeqCst)); - #[inline] - pub fn fetch_and(&self, val: int, order: Ordering) -> int { - unsafe { atomic_and(self.v.get(), val, order) } - } - - /// Bitwise or with the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0b101101); - /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst)); - /// assert_eq!(0b111111, foo.load(SeqCst)); - #[inline] - pub fn fetch_or(&self, val: int, order: Ordering) -> int { - unsafe { atomic_or(self.v.get(), val, order) } - } - - /// Bitwise xor with the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0b101101); - /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst)); - /// assert_eq!(0b011110, foo.load(SeqCst)); - #[inline] - pub fn fetch_xor(&self, val: int, order: Ordering) -> int { - unsafe { atomic_xor(self.v.get(), val, order) } - } -} - -impl AtomicUint { - /// Create a new `AtomicUint` - pub fn new(v: uint) -> AtomicUint { - AtomicUint { v: Unsafe::new(v), nocopy: marker::NoCopy } - } - - /// Load the value - #[inline] - pub fn load(&self, order: Ordering) -> uint { - unsafe { atomic_load(self.v.get() as *uint, order) } - } - - /// Store the value - #[inline] - pub fn store(&self, val: uint, order: Ordering) { - unsafe { atomic_store(self.v.get(), val, order); } - } - - /// Store a value, returning the old value - #[inline] - pub fn swap(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_swap(self.v.get(), val, order) } - } - - /// If the current value is the same as expected, store a new value - /// - /// Compare the current value with `old`; if they are the same then - /// replace the current value with `new`. Return the previous value. - /// If the return value is equal to `old` then the value was updated. - #[inline] - pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { - unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) } - } - - /// Add to the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0); - /// assert_eq!(0, foo.fetch_add(10, SeqCst)); - /// assert_eq!(10, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_add(self.v.get(), val, order) } - } - - /// Subtract from the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(10); - /// assert_eq!(10, foo.fetch_sub(10, SeqCst)); - /// assert_eq!(0, foo.load(SeqCst)); - /// ``` - #[inline] - pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_sub(self.v.get(), val, order) } - } - - /// Bitwise and with the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0b101101); - /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst)); - /// assert_eq!(0b100001, foo.load(SeqCst)); - #[inline] - pub fn fetch_and(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_and(self.v.get(), val, order) } - } - - /// Bitwise or with the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0b101101); - /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst)); - /// assert_eq!(0b111111, foo.load(SeqCst)); - #[inline] - pub fn fetch_or(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_or(self.v.get(), val, order) } - } - - /// Bitwise xor with the current value, returning the previous - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomics::{AtomicUint, SeqCst}; - /// - /// let foo = AtomicUint::new(0b101101); - /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst)); - /// assert_eq!(0b011110, foo.load(SeqCst)); - #[inline] - pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_xor(self.v.get(), val, order) } - } -} - -impl AtomicPtr { - /// Create a new `AtomicPtr` - pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p: Unsafe::new(p as uint), nocopy: marker::NoCopy } - } - - /// Load the value - #[inline] - pub fn load(&self, order: Ordering) -> *mut T { - unsafe { - atomic_load(self.p.get() as **mut T, order) as *mut T - } - } - - /// Store the value - #[inline] - pub fn store(&self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(self.p.get(), ptr as uint, order); } - } - - /// Store a value, returning the old value - #[inline] - pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(self.p.get(), ptr as uint, order) as *mut T } - } - - /// If the current value is the same as expected, store a new value - /// - /// Compare the current value with `old`; if they are the same then - /// replace the current value with `new`. Return the previous value. - /// If the return value is equal to `old` then the value was updated. - #[inline] - pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { - unsafe { - atomic_compare_and_swap(self.p.get(), old as uint, - new as uint, order) as *mut T - } - } + p: AtomicUint, } impl AtomicOption { /// Create a new `AtomicOption` pub fn new(p: Box) -> AtomicOption { - unsafe { AtomicOption { p: Unsafe::new(mem::transmute(p)) } } + unsafe { AtomicOption { p: AtomicUint::new(mem::transmute(p)) } } } /// Create a new `AtomicOption` that doesn't contain a value - pub fn empty() -> AtomicOption { AtomicOption { p: Unsafe::new(0) } } + pub fn empty() -> AtomicOption { AtomicOption { p: AtomicUint::new(0) } } /// Store a value, returning the old value #[inline] pub fn swap(&self, val: Box, order: Ordering) -> Option> { - unsafe { - let val = mem::transmute(val); + let val = unsafe { mem::transmute(val) }; - let p = atomic_swap(self.p.get(), val, order); - if p as uint == 0 { - None - } else { - Some(mem::transmute(p)) - } + match self.p.swap(val, order) { + 0 => None, + n => Some(unsafe { mem::transmute(n) }), } } @@ -702,7 +160,7 @@ impl AtomicOption { unsafe { let val = mem::transmute(val); let expected = mem::transmute(0); - let oldval = atomic_compare_and_swap(self.p.get(), expected, val, order); + let oldval = self.p.compare_and_swap(expected, val, order); if oldval == expected { None } else { @@ -717,7 +175,7 @@ impl AtomicOption { /// result does not get invalidated by another task after this returns. #[inline] pub fn is_empty(&self, order: Ordering) -> bool { - unsafe { atomic_load(self.p.get() as *uint, order) as uint == 0 } + self.p.load(order) as uint == 0 } } @@ -728,165 +186,11 @@ impl Drop for AtomicOption { } } -#[inline] -unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { - match order { - Release => intrinsics::atomic_store_rel(dst, val), - Relaxed => intrinsics::atomic_store_relaxed(dst, val), - _ => intrinsics::atomic_store(dst, val) - } -} - -#[inline] -unsafe fn atomic_load(dst: *T, order:Ordering) -> T { - match order { - Acquire => intrinsics::atomic_load_acq(dst), - Relaxed => intrinsics::atomic_load_relaxed(dst), - _ => intrinsics::atomic_load(dst) - } -} - -#[inline] -unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_xchg_acq(dst, val), - Release => intrinsics::atomic_xchg_rel(dst, val), - AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), - Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), - _ => intrinsics::atomic_xchg(dst, val) - } -} - -/// Returns the old value (like __sync_fetch_and_add). -#[inline] -unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_xadd_acq(dst, val), - Release => intrinsics::atomic_xadd_rel(dst, val), - AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), - Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), - _ => intrinsics::atomic_xadd(dst, val) - } -} - -/// Returns the old value (like __sync_fetch_and_sub). -#[inline] -unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_xsub_acq(dst, val), - Release => intrinsics::atomic_xsub_rel(dst, val), - AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), - Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), - _ => intrinsics::atomic_xsub(dst, val) - } -} - -#[inline] -unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), - Release => intrinsics::atomic_cxchg_rel(dst, old, new), - AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), - Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), - _ => intrinsics::atomic_cxchg(dst, old, new), - } -} - -#[inline] -unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_and_acq(dst, val), - Release => intrinsics::atomic_and_rel(dst, val), - AcqRel => intrinsics::atomic_and_acqrel(dst, val), - Relaxed => intrinsics::atomic_and_relaxed(dst, val), - _ => intrinsics::atomic_and(dst, val) - } -} - -#[inline] -unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_nand_acq(dst, val), - Release => intrinsics::atomic_nand_rel(dst, val), - AcqRel => intrinsics::atomic_nand_acqrel(dst, val), - Relaxed => intrinsics::atomic_nand_relaxed(dst, val), - _ => intrinsics::atomic_nand(dst, val) - } -} - - -#[inline] -unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_or_acq(dst, val), - Release => intrinsics::atomic_or_rel(dst, val), - AcqRel => intrinsics::atomic_or_acqrel(dst, val), - Relaxed => intrinsics::atomic_or_relaxed(dst, val), - _ => intrinsics::atomic_or(dst, val) - } -} - - -#[inline] -unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_xor_acq(dst, val), - Release => intrinsics::atomic_xor_rel(dst, val), - AcqRel => intrinsics::atomic_xor_acqrel(dst, val), - Relaxed => intrinsics::atomic_xor_relaxed(dst, val), - _ => intrinsics::atomic_xor(dst, val) - } -} - - -/// An atomic fence. -/// -/// A fence 'A' which has `Release` ordering semantics, synchronizes with a -/// fence 'B' with (at least) `Acquire` semantics, if and only if there exists -/// atomic operations X and Y, both operating on some atomic object 'M' such -/// that A is sequenced before X, Y is synchronized before B and Y observers -/// the change to M. This provides a happens-before dependence between A and B. -/// -/// Atomic operations with `Release` or `Acquire` semantics can also synchronize -/// with a fence. -/// -/// A fence with has `SeqCst` ordering, in addition to having both `Acquire` and -/// `Release` semantics, participates in the global program order of the other -/// `SeqCst` operations and/or fences. -/// -/// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. -/// -/// # Failure -/// -/// Fails if `order` is `Relaxed` -#[inline] -pub fn fence(order: Ordering) { - unsafe { - match order { - Acquire => intrinsics::atomic_fence_acq(), - Release => intrinsics::atomic_fence_rel(), - AcqRel => intrinsics::atomic_fence_acqrel(), - SeqCst => intrinsics::atomic_fence(), - Relaxed => fail!("there is no such thing as a relaxed fence") - } - } -} - #[cfg(test)] mod test { use option::*; use super::*; - #[test] - fn bool_() { - let a = AtomicBool::new(false); - assert_eq!(a.compare_and_swap(false, true, SeqCst), false); - assert_eq!(a.compare_and_swap(false, true, SeqCst), true); - - a.store(false, SeqCst); - assert_eq!(a.compare_and_swap(false, true, SeqCst), false); - } - #[test] fn option_empty() { let option: AtomicOption<()> = AtomicOption::empty(); @@ -900,109 +204,31 @@ mod test { let b = p.swap(a, SeqCst); - assert_eq!(b, Some(box 1)); - assert_eq!(p.take(SeqCst), Some(box 2)); + assert!(b == Some(box 1)); + assert!(p.take(SeqCst) == Some(box 2)); } #[test] fn option_take() { let p = AtomicOption::new(box 1); - assert_eq!(p.take(SeqCst), Some(box 1)); - assert_eq!(p.take(SeqCst), None); + assert!(p.take(SeqCst) == Some(box 1)); + assert!(p.take(SeqCst) == None); let p2 = box 2; p.swap(p2, SeqCst); - assert_eq!(p.take(SeqCst), Some(box 2)); + assert!(p.take(SeqCst) == Some(box 2)); } #[test] fn option_fill() { let p = AtomicOption::new(box 1); assert!(p.fill(box 2, SeqCst).is_some()); // should fail; shouldn't leak! - assert_eq!(p.take(SeqCst), Some(box 1)); + assert!(p.take(SeqCst) == Some(box 1)); assert!(p.fill(box 2, SeqCst).is_none()); // shouldn't fail - assert_eq!(p.take(SeqCst), Some(box 2)); - } - - #[test] - fn bool_and() { - let a = AtomicBool::new(true); - assert_eq!(a.fetch_and(false, SeqCst),true); - assert_eq!(a.load(SeqCst),false); - } - - #[test] - fn uint_and() { - let x = AtomicUint::new(0xf731); - assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); - assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); - } - - #[test] - fn uint_or() { - let x = AtomicUint::new(0xf731); - assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); - assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); - } - - #[test] - fn uint_xor() { - let x = AtomicUint::new(0xf731); - assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); - assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); - } - - #[test] - fn int_and() { - let x = AtomicInt::new(0xf731); - assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); - assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); - } - - #[test] - fn int_or() { - let x = AtomicInt::new(0xf731); - assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); - assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); - } - - #[test] - fn int_xor() { - let x = AtomicInt::new(0xf731); - assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); - assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); - } - - static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL; - static mut S_INT : AtomicInt = INIT_ATOMIC_INT; - static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT; - - #[test] - fn static_init() { - unsafe { - assert!(!S_BOOL.load(SeqCst)); - assert!(S_INT.load(SeqCst) == 0); - assert!(S_UINT.load(SeqCst) == 0); - } - } - - #[test] - fn different_sizes() { - unsafe { - let mut slot = 0u16; - assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); - - let mut slot = 0u8; - assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); - - let slot = 0u32; - assert_eq!(super::atomic_load(&slot, SeqCst), 0); - - let mut slot = 0u64; - super::atomic_store(&mut slot, 2, SeqCst); - } + assert!(p.take(SeqCst) == Some(box 2)); } } +