|
1 |
| -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT |
| 1 | +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT |
2 | 2 | // file at the top-level directory of this distribution and at
|
3 | 3 | // http://rust-lang.org/COPYRIGHT.
|
4 | 4 | //
|
@@ -462,6 +462,51 @@ impl AtomicInt {
|
462 | 462 | pub fn fetch_sub(&self, val: int, order: Ordering) -> int {
|
463 | 463 | unsafe { atomic_sub(self.v.get(), val, order) }
|
464 | 464 | }
|
| 465 | + |
| 466 | + /// Bitwise and with the current value, returning the previous |
| 467 | + /// |
| 468 | + /// # Examples |
| 469 | + /// |
| 470 | + /// ``` |
| 471 | + /// use std::sync::atomics::{AtomicUint, SeqCst}; |
| 472 | + /// |
| 473 | + /// let foo = AtomicUint::new(0b101101); |
| 474 | + /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst)); |
| 475 | + /// assert_eq!(0b100001, foo.load(SeqCst)); |
| 476 | + #[inline] |
| 477 | + pub fn fetch_and(&self, val: int, order: Ordering) -> int { |
| 478 | + unsafe { atomic_and(self.v.get(), val, order) } |
| 479 | + } |
| 480 | + |
| 481 | + /// Bitwise or with the current value, returning the previous |
| 482 | + /// |
| 483 | + /// # Examples |
| 484 | + /// |
| 485 | + /// ``` |
| 486 | + /// use std::sync::atomics::{AtomicUint, SeqCst}; |
| 487 | + /// |
| 488 | + /// let foo = AtomicUint::new(0b101101); |
| 489 | + /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst)); |
| 490 | + /// assert_eq!(0b111111, foo.load(SeqCst)); |
| 491 | + #[inline] |
| 492 | + pub fn fetch_or(&self, val: int, order: Ordering) -> int { |
| 493 | + unsafe { atomic_or(self.v.get(), val, order) } |
| 494 | + } |
| 495 | + |
| 496 | + /// Bitwise xor with the current value, returning the previous |
| 497 | + /// |
| 498 | + /// # Examples |
| 499 | + /// |
| 500 | + /// ``` |
| 501 | + /// use std::sync::atomics::{AtomicUint, SeqCst}; |
| 502 | + /// |
| 503 | + /// let foo = AtomicUint::new(0b101101); |
| 504 | + /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst)); |
| 505 | + /// assert_eq!(0b011110, foo.load(SeqCst)); |
| 506 | + #[inline] |
| 507 | + pub fn fetch_xor(&self, val: int, order: Ordering) -> int { |
| 508 | + unsafe { atomic_xor(self.v.get(), val, order) } |
| 509 | + } |
465 | 510 | }
|
466 | 511 |
|
467 | 512 | impl AtomicUint {
|
@@ -529,6 +574,51 @@ impl AtomicUint {
|
529 | 574 | pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
|
530 | 575 | unsafe { atomic_sub(self.v.get(), val, order) }
|
531 | 576 | }
|
| 577 | + |
| 578 | + /// Bitwise and with the current value, returning the previous |
| 579 | + /// |
| 580 | + /// # Examples |
| 581 | + /// |
| 582 | + /// ``` |
| 583 | + /// use std::sync::atomics::{AtomicUint, SeqCst}; |
| 584 | + /// |
| 585 | + /// let foo = AtomicUint::new(0b101101); |
| 586 | + /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst)); |
| 587 | + /// assert_eq!(0b100001, foo.load(SeqCst)); |
| 588 | + #[inline] |
| 589 | + pub fn fetch_and(&self, val: uint, order: Ordering) -> uint { |
| 590 | + unsafe { atomic_and(self.v.get(), val, order) } |
| 591 | + } |
| 592 | + |
| 593 | + /// Bitwise or with the current value, returning the previous |
| 594 | + /// |
| 595 | + /// # Examples |
| 596 | + /// |
| 597 | + /// ``` |
| 598 | + /// use std::sync::atomics::{AtomicUint, SeqCst}; |
| 599 | + /// |
| 600 | + /// let foo = AtomicUint::new(0b101101); |
| 601 | + /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst)); |
| 602 | + /// assert_eq!(0b111111, foo.load(SeqCst)); |
| 603 | + #[inline] |
| 604 | + pub fn fetch_or(&self, val: uint, order: Ordering) -> uint { |
| 605 | + unsafe { atomic_or(self.v.get(), val, order) } |
| 606 | + } |
| 607 | + |
| 608 | + /// Bitwise xor with the current value, returning the previous |
| 609 | + /// |
| 610 | + /// # Examples |
| 611 | + /// |
| 612 | + /// ``` |
| 613 | + /// use std::sync::atomics::{AtomicUint, SeqCst}; |
| 614 | + /// |
| 615 | + /// let foo = AtomicUint::new(0b101101); |
| 616 | + /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst)); |
| 617 | + /// assert_eq!(0b011110, foo.load(SeqCst)); |
| 618 | + #[inline] |
| 619 | + pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint { |
| 620 | + unsafe { atomic_xor(self.v.get(), val, order) } |
| 621 | + } |
532 | 622 | }
|
533 | 623 |
|
534 | 624 | impl<T> AtomicPtr<T> {
|
@@ -843,6 +933,48 @@ mod test {
|
843 | 933 | assert_eq!(a.load(SeqCst),false);
|
844 | 934 | }
|
845 | 935 |
|
| 936 | + #[test] |
| 937 | + fn uint_and() { |
| 938 | + let x = AtomicUint::new(0xf731); |
| 939 | + assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); |
| 940 | + assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); |
| 941 | + } |
| 942 | + |
| 943 | + #[test] |
| 944 | + fn uint_or() { |
| 945 | + let x = AtomicUint::new(0xf731); |
| 946 | + assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); |
| 947 | + assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); |
| 948 | + } |
| 949 | + |
| 950 | + #[test] |
| 951 | + fn uint_xor() { |
| 952 | + let x = AtomicUint::new(0xf731); |
| 953 | + assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); |
| 954 | + assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); |
| 955 | + } |
| 956 | + |
| 957 | + #[test] |
| 958 | + fn int_and() { |
| 959 | + let x = AtomicInt::new(0xf731); |
| 960 | + assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731); |
| 961 | + assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); |
| 962 | + } |
| 963 | + |
| 964 | + #[test] |
| 965 | + fn int_or() { |
| 966 | + let x = AtomicInt::new(0xf731); |
| 967 | + assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731); |
| 968 | + assert_eq!(x.load(SeqCst), 0xf731 | 0x137f); |
| 969 | + } |
| 970 | + |
| 971 | + #[test] |
| 972 | + fn int_xor() { |
| 973 | + let x = AtomicInt::new(0xf731); |
| 974 | + assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731); |
| 975 | + assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); |
| 976 | + } |
| 977 | + |
846 | 978 | static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL;
|
847 | 979 | static mut S_INT : AtomicInt = INIT_ATOMIC_INT;
|
848 | 980 | static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT;
|
|
0 commit comments