(index<- ) ./libstd/sync/atomics.rs
git branch: * master 5200215 auto merge of #14035 : alexcrichton/rust/experimental, r=huonw
modified: Fri May 9 13:02:28 2014
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Atomic types
12 //!
13 //! Atomic types provide primitive shared-memory communication between
14 //! threads, and are the building blocks of other concurrent
15 //! types.
16 //!
17 //! This module defines atomic versions of a select number of primitive
18 //! types, including `AtomicBool`, `AtomicInt`, `AtomicUint`, and `AtomicOption`.
19 //! Atomic types present operations that, when used correctly, synchronize
20 //! updates between threads.
21 //!
22 //! Each method takes an `Ordering` which represents the strength of
23 //! the memory barrier for that operation. These orderings are the
24 //! same as [C++11 atomic orderings][1].
25 //!
26 //! [1]: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
27 //!
28 //! Atomic variables are safe to share between threads (they implement `Share`)
29 //! but they do not themselves provide the mechanism for sharing. The most
30 //! common way to share an atomic variable is to put it into an `Arc` (an
31 //! atomically-reference-counted shared pointer).
32 //!
33 //! Most atomic types may be stored in static variables, initialized using
34 //! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics
35 //! are often used for lazy global initialization.
36 //!
37 //!
38 //! # Examples
39 //!
40 //! A simple spinlock:
41 //!
42 //! ```
43 //! extern crate sync;
44 //!
45 //! use sync::Arc;
46 //! use std::sync::atomics::{AtomicUint, SeqCst};
47 //! use std::task::deschedule;
48 //!
49 //! fn main() {
50 //! let spinlock = Arc::new(AtomicUint::new(1));
51 //!
52 //! let spinlock_clone = spinlock.clone();
53 //! spawn(proc() {
54 //! spinlock_clone.store(0, SeqCst);
55 //! });
56 //!
57 //! // Wait for the other task to release the lock
58 //! while spinlock.load(SeqCst) != 0 {
59 //! // Since tasks may not be preemptive (if they are green threads)
60 //! // yield to the scheduler to let the other task run. Low level
61 //! // concurrent code needs to take into account Rust's two threading
62 //! // models.
63 //! deschedule();
64 //! }
65 //! }
66 //! ```
67 //!
68 //! Transferring a heap object with `AtomicOption`:
69 //!
70 //! ```
71 //! extern crate sync;
72 //!
73 //! use sync::Arc;
74 //! use std::sync::atomics::{AtomicOption, SeqCst};
75 //!
76 //! fn main() {
77 //! struct BigObject;
78 //!
79 //! let shared_big_object = Arc::new(AtomicOption::empty());
80 //!
81 //! let shared_big_object_clone = shared_big_object.clone();
82 //! spawn(proc() {
83 //! let unwrapped_big_object = shared_big_object_clone.take(SeqCst);
84 //! if unwrapped_big_object.is_some() {
85 //! println!("got a big object from another task");
86 //! } else {
87 //! println!("other task hasn't sent big object yet");
88 //! }
89 //! });
90 //!
91 //! shared_big_object.swap(box BigObject, SeqCst);
92 //! }
93 //! ```
94 //!
95 //! Keep a global count of live tasks:
96 //!
97 //! ```
98 //! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
99 //!
100 //! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
101 //!
102 //! unsafe {
103 //! let old_task_count = GLOBAL_TASK_COUNT.fetch_add(1, SeqCst);
104 //! println!("live tasks: {}", old_task_count + 1);
105 //! }
106 //! ```
107
108 #![allow(missing_doc)]
109
110 use intrinsics;
111 use cast;
112 use std::kinds::marker;
113 use option::{Option,Some,None};
114 use ops::Drop;
115 use owned::Box;
116 use ty::Unsafe;
117
118 /// An atomic boolean type.
119 pub struct AtomicBool {
120 v: Unsafe<uint>,
121 nocopy: marker::NoCopy
122 }
123
124 /// A signed atomic integer type, supporting basic atomic arithmetic operations
125 pub struct AtomicInt {
126 v: Unsafe<int>,
127 nocopy: marker::NoCopy
128 }
129
130 /// An unsigned atomic integer type, supporting basic atomic arithmetic operations
131 pub struct AtomicUint {
132 v: Unsafe<uint>,
133 nocopy: marker::NoCopy
134 }
135
136 /// An unsafe atomic pointer. Only supports basic atomic operations
137 pub struct AtomicPtr<T> {
138 p: Unsafe<uint>,
139 nocopy: marker::NoCopy
140 }
141
142 /// An atomic, nullable unique pointer
143 ///
144 /// This can be used as the concurrency primitive for operations that transfer
145 /// owned heap objects across tasks.
146 #[unsafe_no_drop_flag]
147 pub struct AtomicOption<T> {
148 p: Unsafe<uint>,
149 }
150
151 /// Atomic memory orderings
152 ///
153 /// Memory orderings limit the ways that both the compiler and CPU may reorder
154 /// instructions around atomic operations. At its most restrictive,
155 /// "sequentially consistent" atomics allow neither reads nor writes
156 /// to be moved either before or after the atomic operation; on the other end
157 /// "relaxed" atomics allow all reorderings.
158 ///
159 /// Rust's memory orderings are the same as in C++[1].
160 ///
161 /// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
162 pub enum Ordering {
163 /// No ordering constraints, only atomic operations
164 Relaxed,
165 /// When coupled with a store, all previous writes become visible
166 /// to another thread that performs a load with `Acquire` ordering
167 /// on the same value
168 Release,
169 /// When coupled with a load, all subsequent loads will see data
170 /// written before a store with `Release` ordering on the same value
171 /// in another thread
172 Acquire,
173 /// When coupled with a load, uses `Acquire` ordering, and with a store
174 /// `Release` ordering
175 AcqRel,
176 /// Like `AcqRel` with the additional guarantee that all threads see all
177 /// sequentially consistent operations in the same order.
178 SeqCst
179 }
180
181 /// An `AtomicBool` initialized to `false`
182 pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: Unsafe{value: 0,
183 marker1: marker::InvariantType},
184 nocopy: marker::NoCopy };
185 /// An `AtomicInt` initialized to `0`
186 pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: Unsafe{value: 0,
187 marker1: marker::InvariantType},
188 nocopy: marker::NoCopy };
189 /// An `AtomicUint` initialized to `0`
190 pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: Unsafe{value: 0,
191 marker1: marker::InvariantType},
192 nocopy: marker::NoCopy };
193
194 // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
195 static UINT_TRUE: uint = -1;
196
197 impl AtomicBool {
198 /// Create a new `AtomicBool`
199 pub fn new(v: bool) -> AtomicBool {
200 let val = if v { UINT_TRUE } else { 0 };
201 AtomicBool { v: Unsafe::new(val), nocopy: marker::NoCopy }
202 }
203
204 /// Load the value
205 #[inline]
206 pub fn load(&self, order: Ordering) -> bool {
207 unsafe { atomic_load(self.v.get() as *uint, order) > 0 }
208 }
209
210 /// Store the value
211 #[inline]
212 pub fn store(&self, val: bool, order: Ordering) {
213 let val = if val { UINT_TRUE } else { 0 };
214
215 unsafe { atomic_store(self.v.get(), val, order); }
216 }
217
218 /// Store a value, returning the old value
219 #[inline]
220 pub fn swap(&self, val: bool, order: Ordering) -> bool {
221 let val = if val { UINT_TRUE } else { 0 };
222
223 unsafe { atomic_swap(self.v.get(), val, order) > 0 }
224 }
225
226 /// If the current value is the same as expected, store a new value
227 ///
228 /// Compare the current value with `old`; if they are the same then
229 /// replace the current value with `new`. Return the previous value.
230 /// If the return value is equal to `old` then the value was updated.
231 ///
232 /// # Examples
233 ///
234 /// ```ignore
235 /// # // FIXME: Needs PR #12430
236 /// extern crate sync;
237 ///
238 /// use sync::Arc;
239 /// use std::sync::atomics::{AtomicBool, SeqCst};
240 ///
241 /// fn main() {
242 /// let spinlock = Arc::new(AtomicBool::new(false));
243 /// let spinlock_clone = spin_lock.clone();
244 ///
245 /// spawn(proc() {
246 /// with_lock(&spinlock, || println!("task 1 in lock"));
247 /// });
248 ///
249 /// spawn(proc() {
250 /// with_lock(&spinlock_clone, || println!("task 2 in lock"));
251 /// });
252 /// }
253 ///
254 /// fn with_lock(spinlock: &Arc<AtomicBool>, f: || -> ()) {
255 /// // CAS loop until we are able to replace `false` with `true`
256 /// while spinlock.compare_and_swap(false, true, SeqCst) == false {
257 /// // Since tasks may not be preemptive (if they are green threads)
258 /// // yield to the scheduler to let the other task run. Low level
259 /// // concurrent code needs to take into account Rust's two threading
260 /// // models.
261 /// deschedule();
262 /// }
263 ///
264 /// // Now we have the spinlock
265 /// f();
266 ///
267 /// // Release the lock
268 /// spinlock.store(false);
269 /// }
270 /// ```
271 #[inline]
272 pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool {
273 let old = if old { UINT_TRUE } else { 0 };
274 let new = if new { UINT_TRUE } else { 0 };
275
276 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
277 }
278
279 /// A logical "and" operation
280 ///
281 /// Performs a logical "and" operation on the current value and the
282 /// argument `val`, and sets the new value to the result.
283 /// Returns the previous value.
284 ///
285 /// # Examples
286 ///
287 /// ```
288 /// use std::sync::atomics::{AtomicBool, SeqCst};
289 ///
290 /// let foo = AtomicBool::new(true);
291 /// assert_eq!(true, foo.fetch_and(false, SeqCst));
292 /// assert_eq!(false, foo.load(SeqCst));
293 ///
294 /// let foo = AtomicBool::new(true);
295 /// assert_eq!(true, foo.fetch_and(true, SeqCst));
296 /// assert_eq!(true, foo.load(SeqCst));
297 ///
298 /// let foo = AtomicBool::new(false);
299 /// assert_eq!(false, foo.fetch_and(false, SeqCst));
300 /// assert_eq!(false, foo.load(SeqCst));
301 /// ```
302 #[inline]
303 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
304 let val = if val { UINT_TRUE } else { 0 };
305
306 unsafe { atomic_and(self.v.get(), val, order) > 0 }
307 }
308
309 /// A logical "nand" operation
310 ///
311 /// Performs a logical "nand" operation on the current value and the
312 /// argument `val`, and sets the new value to the result.
313 /// Returns the previous value.
314 ///
315 /// # Examples
316 ///
317 /// ```
318 /// use std::sync::atomics::{AtomicBool, SeqCst};
319 ///
320 /// let foo = AtomicBool::new(true);
321 /// assert_eq!(true, foo.fetch_nand(false, SeqCst));
322 /// assert_eq!(true, foo.load(SeqCst));
323 ///
324 /// let foo = AtomicBool::new(true);
325 /// assert_eq!(true, foo.fetch_nand(true, SeqCst));
326 /// assert_eq!(0, foo.load(SeqCst) as int);
327 /// assert_eq!(false, foo.load(SeqCst));
328 ///
329 /// let foo = AtomicBool::new(false);
330 /// assert_eq!(false, foo.fetch_nand(false, SeqCst));
331 /// assert_eq!(true, foo.load(SeqCst));
332 /// ```
333 #[inline]
334 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
335 let val = if val { UINT_TRUE } else { 0 };
336
337 unsafe { atomic_nand(self.v.get(), val, order) > 0 }
338 }
339
340 /// A logical "or" operation
341 ///
342 /// Performs a logical "or" operation on the current value and the
343 /// argument `val`, and sets the new value to the result.
344 /// Returns the previous value.
345 ///
346 /// # Examples
347 ///
348 /// ```
349 /// use std::sync::atomics::{AtomicBool, SeqCst};
350 ///
351 /// let foo = AtomicBool::new(true);
352 /// assert_eq!(true, foo.fetch_or(false, SeqCst));
353 /// assert_eq!(true, foo.load(SeqCst));
354 ///
355 /// let foo = AtomicBool::new(true);
356 /// assert_eq!(true, foo.fetch_or(true, SeqCst));
357 /// assert_eq!(true, foo.load(SeqCst));
358 ///
359 /// let foo = AtomicBool::new(false);
360 /// assert_eq!(false, foo.fetch_or(false, SeqCst));
361 /// assert_eq!(false, foo.load(SeqCst));
362 /// ```
363 #[inline]
364 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
365 let val = if val { UINT_TRUE } else { 0 };
366
367 unsafe { atomic_or(self.v.get(), val, order) > 0 }
368 }
369
370 /// A logical "xor" operation
371 ///
372 /// Performs a logical "xor" operation on the current value and the
373 /// argument `val`, and sets the new value to the result.
374 /// Returns the previous value.
375 ///
376 /// # Examples
377 ///
378 /// ```
379 /// use std::sync::atomics::{AtomicBool, SeqCst};
380 ///
381 /// let foo = AtomicBool::new(true);
382 /// assert_eq!(true, foo.fetch_xor(false, SeqCst));
383 /// assert_eq!(true, foo.load(SeqCst));
384 ///
385 /// let foo = AtomicBool::new(true);
386 /// assert_eq!(true, foo.fetch_xor(true, SeqCst));
387 /// assert_eq!(false, foo.load(SeqCst));
388 ///
389 /// let foo = AtomicBool::new(false);
390 /// assert_eq!(false, foo.fetch_xor(false, SeqCst));
391 /// assert_eq!(false, foo.load(SeqCst));
392 /// ```
393 #[inline]
394 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
395 let val = if val { UINT_TRUE } else { 0 };
396
397 unsafe { atomic_xor(self.v.get(), val, order) > 0 }
398 }
399 }
400
401 impl AtomicInt {
402 /// Create a new `AtomicInt`
403 pub fn new(v: int) -> AtomicInt {
404 AtomicInt {v: Unsafe::new(v), nocopy: marker::NoCopy}
405 }
406
407 /// Load the value
408 #[inline]
409 pub fn load(&self, order: Ordering) -> int {
410 unsafe { atomic_load(self.v.get() as *int, order) }
411 }
412
413 /// Store the value
414 #[inline]
415 pub fn store(&self, val: int, order: Ordering) {
416 unsafe { atomic_store(self.v.get(), val, order); }
417 }
418
419 /// Store a value, returning the old value
420 #[inline]
421 pub fn swap(&self, val: int, order: Ordering) -> int {
422 unsafe { atomic_swap(self.v.get(), val, order) }
423 }
424
425 /// If the current value is the same as expected, store a new value
426 ///
427 /// Compare the current value with `old`; if they are the same then
428 /// replace the current value with `new`. Return the previous value.
429 /// If the return value is equal to `old` then the value was updated.
430 #[inline]
431 pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int {
432 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
433 }
434
435 /// Add to the current value, returning the previous
436 ///
437 /// # Examples
438 ///
439 /// ```
440 /// use std::sync::atomics::{AtomicInt, SeqCst};
441 ///
442 /// let foo = AtomicInt::new(0);
443 /// assert_eq!(0, foo.fetch_add(10, SeqCst));
444 /// assert_eq!(10, foo.load(SeqCst));
445 /// ```
446 #[inline]
447 pub fn fetch_add(&self, val: int, order: Ordering) -> int {
448 unsafe { atomic_add(self.v.get(), val, order) }
449 }
450
451 /// Subtract from the current value, returning the previous
452 ///
453 /// # Examples
454 ///
455 /// ```
456 /// use std::sync::atomics::{AtomicInt, SeqCst};
457 ///
458 /// let foo = AtomicInt::new(0);
459 /// assert_eq!(0, foo.fetch_sub(10, SeqCst));
460 /// assert_eq!(-10, foo.load(SeqCst));
461 /// ```
462 #[inline]
463 pub fn fetch_sub(&self, val: int, order: Ordering) -> int {
464 unsafe { atomic_sub(self.v.get(), val, order) }
465 }
466
467 /// Bitwise and with the current value, returning the previous
468 ///
469 /// # Examples
470 ///
471 /// ```
472 /// use std::sync::atomics::{AtomicUint, SeqCst};
473 ///
474 /// let foo = AtomicUint::new(0b101101);
475 /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
476 /// assert_eq!(0b100001, foo.load(SeqCst));
477 #[inline]
478 pub fn fetch_and(&self, val: int, order: Ordering) -> int {
479 unsafe { atomic_and(self.v.get(), val, order) }
480 }
481
482 /// Bitwise or with the current value, returning the previous
483 ///
484 /// # Examples
485 ///
486 /// ```
487 /// use std::sync::atomics::{AtomicUint, SeqCst};
488 ///
489 /// let foo = AtomicUint::new(0b101101);
490 /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
491 /// assert_eq!(0b111111, foo.load(SeqCst));
492 #[inline]
493 pub fn fetch_or(&self, val: int, order: Ordering) -> int {
494 unsafe { atomic_or(self.v.get(), val, order) }
495 }
496
497 /// Bitwise xor with the current value, returning the previous
498 ///
499 /// # Examples
500 ///
501 /// ```
502 /// use std::sync::atomics::{AtomicUint, SeqCst};
503 ///
504 /// let foo = AtomicUint::new(0b101101);
505 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
506 /// assert_eq!(0b011110, foo.load(SeqCst));
507 #[inline]
508 pub fn fetch_xor(&self, val: int, order: Ordering) -> int {
509 unsafe { atomic_xor(self.v.get(), val, order) }
510 }
511 }
512
513 impl AtomicUint {
514 /// Create a new `AtomicUint`
515 pub fn new(v: uint) -> AtomicUint {
516 AtomicUint { v: Unsafe::new(v), nocopy: marker::NoCopy }
517 }
518
519 /// Load the value
520 #[inline]
521 pub fn load(&self, order: Ordering) -> uint {
522 unsafe { atomic_load(self.v.get() as *uint, order) }
523 }
524
525 /// Store the value
526 #[inline]
527 pub fn store(&self, val: uint, order: Ordering) {
528 unsafe { atomic_store(self.v.get(), val, order); }
529 }
530
531 /// Store a value, returning the old value
532 #[inline]
533 pub fn swap(&self, val: uint, order: Ordering) -> uint {
534 unsafe { atomic_swap(self.v.get(), val, order) }
535 }
536
537 /// If the current value is the same as expected, store a new value
538 ///
539 /// Compare the current value with `old`; if they are the same then
540 /// replace the current value with `new`. Return the previous value.
541 /// If the return value is equal to `old` then the value was updated.
542 #[inline]
543 pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint {
544 unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
545 }
546
547 /// Add to the current value, returning the previous
548 ///
549 /// # Examples
550 ///
551 /// ```
552 /// use std::sync::atomics::{AtomicUint, SeqCst};
553 ///
554 /// let foo = AtomicUint::new(0);
555 /// assert_eq!(0, foo.fetch_add(10, SeqCst));
556 /// assert_eq!(10, foo.load(SeqCst));
557 /// ```
558 #[inline]
559 pub fn fetch_add(&self, val: uint, order: Ordering) -> uint {
560 unsafe { atomic_add(self.v.get(), val, order) }
561 }
562
563 /// Subtract from the current value, returning the previous
564 ///
565 /// # Examples
566 ///
567 /// ```
568 /// use std::sync::atomics::{AtomicUint, SeqCst};
569 ///
570 /// let foo = AtomicUint::new(10);
571 /// assert_eq!(10, foo.fetch_sub(10, SeqCst));
572 /// assert_eq!(0, foo.load(SeqCst));
573 /// ```
574 #[inline]
575 pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
576 unsafe { atomic_sub(self.v.get(), val, order) }
577 }
578
579 /// Bitwise and with the current value, returning the previous
580 ///
581 /// # Examples
582 ///
583 /// ```
584 /// use std::sync::atomics::{AtomicUint, SeqCst};
585 ///
586 /// let foo = AtomicUint::new(0b101101);
587 /// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
588 /// assert_eq!(0b100001, foo.load(SeqCst));
589 #[inline]
590 pub fn fetch_and(&self, val: uint, order: Ordering) -> uint {
591 unsafe { atomic_and(self.v.get(), val, order) }
592 }
593
594 /// Bitwise or with the current value, returning the previous
595 ///
596 /// # Examples
597 ///
598 /// ```
599 /// use std::sync::atomics::{AtomicUint, SeqCst};
600 ///
601 /// let foo = AtomicUint::new(0b101101);
602 /// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
603 /// assert_eq!(0b111111, foo.load(SeqCst));
604 #[inline]
605 pub fn fetch_or(&self, val: uint, order: Ordering) -> uint {
606 unsafe { atomic_or(self.v.get(), val, order) }
607 }
608
609 /// Bitwise xor with the current value, returning the previous
610 ///
611 /// # Examples
612 ///
613 /// ```
614 /// use std::sync::atomics::{AtomicUint, SeqCst};
615 ///
616 /// let foo = AtomicUint::new(0b101101);
617 /// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
618 /// assert_eq!(0b011110, foo.load(SeqCst));
619 #[inline]
620 pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint {
621 unsafe { atomic_xor(self.v.get(), val, order) }
622 }
623 }
624
625 impl<T> AtomicPtr<T> {
626 /// Create a new `AtomicPtr`
627 pub fn new(p: *mut T) -> AtomicPtr<T> {
628 AtomicPtr { p: Unsafe::new(p as uint), nocopy: marker::NoCopy }
629 }
630
631 /// Load the value
632 #[inline]
633 pub fn load(&self, order: Ordering) -> *mut T {
634 unsafe {
635 atomic_load(self.p.get() as **mut T, order) as *mut T
636 }
637 }
638
639 /// Store the value
640 #[inline]
641 pub fn store(&self, ptr: *mut T, order: Ordering) {
642 unsafe { atomic_store(self.p.get(), ptr as uint, order); }
643 }
644
645 /// Store a value, returning the old value
646 #[inline]
647 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
648 unsafe { atomic_swap(self.p.get(), ptr as uint, order) as *mut T }
649 }
650
651 /// If the current value is the same as expected, store a new value
652 ///
653 /// Compare the current value with `old`; if they are the same then
654 /// replace the current value with `new`. Return the previous value.
655 /// If the return value is equal to `old` then the value was updated.
656 #[inline]
657 pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T {
658 unsafe {
659 atomic_compare_and_swap(self.p.get(), old as uint,
660 new as uint, order) as *mut T
661 }
662 }
663 }
664
665 impl<T> AtomicOption<T> {
666 /// Create a new `AtomicOption`
667 pub fn new(p: Box<T>) -> AtomicOption<T> {
668 unsafe { AtomicOption { p: Unsafe::new(cast::transmute(p)) } }
669 }
670
671 /// Create a new `AtomicOption` that doesn't contain a value
672 pub fn empty() -> AtomicOption<T> { AtomicOption { p: Unsafe::new(0) } }
673
674 /// Store a value, returning the old value
675 #[inline]
676 pub fn swap(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
677 unsafe {
678 let val = cast::transmute(val);
679
680 let p = atomic_swap(self.p.get(), val, order);
681 if p as uint == 0 {
682 None
683 } else {
684 Some(cast::transmute(p))
685 }
686 }
687 }
688
689 /// Remove the value, leaving the `AtomicOption` empty.
690 #[inline]
691 pub fn take(&self, order: Ordering) -> Option<Box<T>> {
692 unsafe { self.swap(cast::transmute(0), order) }
693 }
694
695 /// Replace an empty value with a non-empty value.
696 ///
697 /// Succeeds if the option is `None` and returns `None` if so. If
698 /// the option was already `Some`, returns `Some` of the rejected
699 /// value.
700 #[inline]
701 pub fn fill(&self, val: Box<T>, order: Ordering) -> Option<Box<T>> {
702 unsafe {
703 let val = cast::transmute(val);
704 let expected = cast::transmute(0);
705 let oldval = atomic_compare_and_swap(self.p.get(), expected, val, order);
706 if oldval == expected {
707 None
708 } else {
709 Some(cast::transmute(val))
710 }
711 }
712 }
713
714 /// Returns `true` if the `AtomicOption` is empty.
715 ///
716 /// Be careful: The caller must have some external method of ensuring the
717 /// result does not get invalidated by another task after this returns.
718 #[inline]
719 pub fn is_empty(&self, order: Ordering) -> bool {
720 unsafe { atomic_load(self.p.get() as *uint, order) as uint == 0 }
721 }
722 }
723
724 #[unsafe_destructor]
725 impl<T> Drop for AtomicOption<T> {
726 fn drop(&mut self) {
727 let _ = self.take(SeqCst);
728 }
729 }
730
731 #[inline]
732 unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
733 match order {
734 Release => intrinsics::atomic_store_rel(dst, val),
735 Relaxed => intrinsics::atomic_store_relaxed(dst, val),
736 _ => intrinsics::atomic_store(dst, val)
737 }
738 }
739
740 #[inline]
741 unsafe fn atomic_load<T>(dst: *T, order:Ordering) -> T {
742 match order {
743 Acquire => intrinsics::atomic_load_acq(dst),
744 Relaxed => intrinsics::atomic_load_relaxed(dst),
745 _ => intrinsics::atomic_load(dst)
746 }
747 }
748
749 #[inline]
750 unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
751 match order {
752 Acquire => intrinsics::atomic_xchg_acq(dst, val),
753 Release => intrinsics::atomic_xchg_rel(dst, val),
754 AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
755 Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
756 _ => intrinsics::atomic_xchg(dst, val)
757 }
758 }
759
760 /// Returns the old value (like __sync_fetch_and_add).
761 #[inline]
762 unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
763 match order {
764 Acquire => intrinsics::atomic_xadd_acq(dst, val),
765 Release => intrinsics::atomic_xadd_rel(dst, val),
766 AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
767 Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
768 _ => intrinsics::atomic_xadd(dst, val)
769 }
770 }
771
772 /// Returns the old value (like __sync_fetch_and_sub).
773 #[inline]
774 unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
775 match order {
776 Acquire => intrinsics::atomic_xsub_acq(dst, val),
777 Release => intrinsics::atomic_xsub_rel(dst, val),
778 AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
779 Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
780 _ => intrinsics::atomic_xsub(dst, val)
781 }
782 }
783
784 #[inline]
785 unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
786 match order {
787 Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
788 Release => intrinsics::atomic_cxchg_rel(dst, old, new),
789 AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
790 Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
791 _ => intrinsics::atomic_cxchg(dst, old, new),
792 }
793 }
794
795 #[inline]
796 unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
797 match order {
798 Acquire => intrinsics::atomic_and_acq(dst, val),
799 Release => intrinsics::atomic_and_rel(dst, val),
800 AcqRel => intrinsics::atomic_and_acqrel(dst, val),
801 Relaxed => intrinsics::atomic_and_relaxed(dst, val),
802 _ => intrinsics::atomic_and(dst, val)
803 }
804 }
805
806 #[inline]
807 unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
808 match order {
809 Acquire => intrinsics::atomic_nand_acq(dst, val),
810 Release => intrinsics::atomic_nand_rel(dst, val),
811 AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
812 Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
813 _ => intrinsics::atomic_nand(dst, val)
814 }
815 }
816
817
818 #[inline]
819 unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
820 match order {
821 Acquire => intrinsics::atomic_or_acq(dst, val),
822 Release => intrinsics::atomic_or_rel(dst, val),
823 AcqRel => intrinsics::atomic_or_acqrel(dst, val),
824 Relaxed => intrinsics::atomic_or_relaxed(dst, val),
825 _ => intrinsics::atomic_or(dst, val)
826 }
827 }
828
829
830 #[inline]
831 unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
832 match order {
833 Acquire => intrinsics::atomic_xor_acq(dst, val),
834 Release => intrinsics::atomic_xor_rel(dst, val),
835 AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
836 Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
837 _ => intrinsics::atomic_xor(dst, val)
838 }
839 }
840
841
842 /// An atomic fence.
843 ///
844 /// A fence 'A' which has `Release` ordering semantics, synchronizes with a
845 /// fence 'B' with (at least) `Acquire` semantics, if and only if there exists
846 /// atomic operations X and Y, both operating on some atomic object 'M' such
847 /// that A is sequenced before X, Y is synchronized before B and Y observers
848 /// the change to M. This provides a happens-before dependence between A and B.
849 ///
850 /// Atomic operations with `Release` or `Acquire` semantics can also synchronize
851 /// with a fence.
852 ///
853 /// A fence with has `SeqCst` ordering, in addition to having both `Acquire` and
854 /// `Release` semantics, participates in the global program order of the other
855 /// `SeqCst` operations and/or fences.
856 ///
857 /// Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings.
858 ///
859 /// # Failure
860 ///
861 /// Fails if `order` is `Relaxed`
862 #[inline]
863 pub fn fence(order: Ordering) {
864 unsafe {
865 match order {
866 Acquire => intrinsics::atomic_fence_acq(),
867 Release => intrinsics::atomic_fence_rel(),
868 AcqRel => intrinsics::atomic_fence_acqrel(),
869 SeqCst => intrinsics::atomic_fence(),
870 Relaxed => fail!("there is no such thing as a relaxed fence")
871 }
872 }
873 }
874
875 #[cfg(test)]
876 mod test {
877 use option::*;
878 use super::*;
879
880 #[test]
881 fn bool_() {
882 let a = AtomicBool::new(false);
883 assert_eq!(a.compare_and_swap(false, true, SeqCst), false);
884 assert_eq!(a.compare_and_swap(false, true, SeqCst), true);
885
886 a.store(false, SeqCst);
887 assert_eq!(a.compare_and_swap(false, true, SeqCst), false);
888 }
889
890 #[test]
891 fn option_empty() {
892 let option: AtomicOption<()> = AtomicOption::empty();
893 assert!(option.is_empty(SeqCst));
894 }
895
896 #[test]
897 fn option_swap() {
898 let p = AtomicOption::new(box 1);
899 let a = box 2;
900
901 let b = p.swap(a, SeqCst);
902
903 assert_eq!(b, Some(box 1));
904 assert_eq!(p.take(SeqCst), Some(box 2));
905 }
906
907 #[test]
908 fn option_take() {
909 let p = AtomicOption::new(box 1);
910
911 assert_eq!(p.take(SeqCst), Some(box 1));
912 assert_eq!(p.take(SeqCst), None);
913
914 let p2 = box 2;
915 p.swap(p2, SeqCst);
916
917 assert_eq!(p.take(SeqCst), Some(box 2));
918 }
919
920 #[test]
921 fn option_fill() {
922 let p = AtomicOption::new(box 1);
923 assert!(p.fill(box 2, SeqCst).is_some()); // should fail; shouldn't leak!
924 assert_eq!(p.take(SeqCst), Some(box 1));
925
926 assert!(p.fill(box 2, SeqCst).is_none()); // shouldn't fail
927 assert_eq!(p.take(SeqCst), Some(box 2));
928 }
929
930 #[test]
931 fn bool_and() {
932 let a = AtomicBool::new(true);
933 assert_eq!(a.fetch_and(false, SeqCst),true);
934 assert_eq!(a.load(SeqCst),false);
935 }
936
937 #[test]
938 fn uint_and() {
939 let x = AtomicUint::new(0xf731);
940 assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
941 assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
942 }
943
944 #[test]
945 fn uint_or() {
946 let x = AtomicUint::new(0xf731);
947 assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
948 assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
949 }
950
951 #[test]
952 fn uint_xor() {
953 let x = AtomicUint::new(0xf731);
954 assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
955 assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
956 }
957
958 #[test]
959 fn int_and() {
960 let x = AtomicInt::new(0xf731);
961 assert_eq!(x.fetch_and(0x137f, SeqCst), 0xf731);
962 assert_eq!(x.load(SeqCst), 0xf731 & 0x137f);
963 }
964
965 #[test]
966 fn int_or() {
967 let x = AtomicInt::new(0xf731);
968 assert_eq!(x.fetch_or(0x137f, SeqCst), 0xf731);
969 assert_eq!(x.load(SeqCst), 0xf731 | 0x137f);
970 }
971
972 #[test]
973 fn int_xor() {
974 let x = AtomicInt::new(0xf731);
975 assert_eq!(x.fetch_xor(0x137f, SeqCst), 0xf731);
976 assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
977 }
978
979 static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL;
980 static mut S_INT : AtomicInt = INIT_ATOMIC_INT;
981 static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT;
982
983 #[test]
984 fn static_init() {
985 unsafe {
986 assert!(!S_BOOL.load(SeqCst));
987 assert!(S_INT.load(SeqCst) == 0);
988 assert!(S_UINT.load(SeqCst) == 0);
989 }
990 }
991
992 #[test]
993 fn different_sizes() {
994 unsafe {
995 let mut slot = 0u16;
996 assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0);
997
998 let mut slot = 0u8;
999 assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0);
1000
1001 let slot = 0u32;
1002 assert_eq!(super::atomic_load(&slot, SeqCst), 0);
1003
1004 let mut slot = 0u64;
1005 super::atomic_store(&mut slot, 2, SeqCst);
1006 }
1007 }
1008 }
libstd/sync/atomics.rs:818:10-818:10 -fn- definition:
unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_or_acq(dst, val),
references:- 3367: unsafe { atomic_or(self.v.get(), val, order) > 0 }
368: }
--
493: pub fn fetch_or(&self, val: int, order: Ordering) -> int {
494: unsafe { atomic_or(self.v.get(), val, order) }
495: }
--
605: pub fn fetch_or(&self, val: uint, order: Ordering) -> uint {
606: unsafe { atomic_or(self.v.get(), val, order) }
607: }
libstd/sync/atomics.rs:130:83-130:83 -struct- definition:
/// An unsigned atomic integer type, supporting basic atomic arithmetic operations
pub struct AtomicUint {
v: Unsafe<uint>,
references:- 22515: pub fn new(v: uint) -> AtomicUint {
516: AtomicUint { v: Unsafe::new(v), nocopy: marker::NoCopy }
517: }
libstd/sync/mpmc_bounded_queue.rs:
40: struct Node<T> {
41: sequence: AtomicUint,
42: value: Option<T>,
--
51: pad2: [u8, ..64],
52: dequeue_pos: AtomicUint,
53: pad3: [u8, ..64],
libstd/sync/spsc_queue.rs:
69: cache_bound: uint,
70: cache_additions: AtomicUint,
71: cache_subtractions: AtomicUint,
72: }
libstd/io/test.rs:
57: pub fn next_test_port() -> u16 {
58: static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
59: unsafe {
--
65: pub fn next_test_unix() -> Path {
66: static mut COUNT: AtomicUint = INIT_ATOMIC_UINT;
67: // base port and pid are an attempt to be unique between multiple
libstd/io/tempfile.rs:
41: static mut CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
libstd/rt/task.rs:
85: pub struct BlockedTasks {
86: inner: UnsafeArc<AtomicUint>,
87: }
--
378: } else {
379: let ptr: Box<UnsafeArc<AtomicUint>> =
380: cast::transmute(blocked_task_ptr & !1);
libstd/rt/bookkeeping.rs:
27: static mut TASK_COUNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
28: static mut TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
libstd/comm/oneshot.rs:
52: // Internal state of the chan/port pair (stores the blocked task as well)
53: state: atomics::AtomicUint,
54: // One-shot data slot location
libstd/comm/shared.rs:
47: steals: int, // How many times has a port received without blocking?
48: to_wake: atomics::AtomicUint, // Task to wake up
libstd/comm/sync.rs:
55: /// the other shared channel already had the code implemented
56: channels: atomics::AtomicUint,
libstd/sync/arc.rs:
43: struct ArcData<T> {
44: count: AtomicUint,
45: data: Unsafe<T>,
libstd/comm/stream.rs:
45: steals: int, // How many times has a port received without blocking?
46: to_wake: atomics::AtomicUint, // Task to wake up
libstd/sync/atomics.rs:773:10-773:10 -fn- definition:
unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xsub_acq(dst, val),
references:- 2575: pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint {
576: unsafe { atomic_sub(self.v.get(), val, order) }
577: }
libstd/sync/atomics.rs:749:10-749:10 -fn- definition:
unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xchg_acq(dst, val),
references:- 5223: unsafe { atomic_swap(self.v.get(), val, order) > 0 }
224: }
--
533: pub fn swap(&self, val: uint, order: Ordering) -> uint {
534: unsafe { atomic_swap(self.v.get(), val, order) }
535: }
--
680: let p = atomic_swap(self.p.get(), val, order);
681: if p as uint == 0 {
libstd/sync/atomics.rs:136:68-136:68 -struct- definition:
/// An unsafe atomic pointer. Only supports basic atomic operations
pub struct AtomicPtr<T> {
p: Unsafe<uint>,
references:- 8627: pub fn new(p: *mut T) -> AtomicPtr<T> {
628: AtomicPtr { p: Unsafe::new(p as uint), nocopy: marker::NoCopy }
629: }
libstd/sync/deque.rs:
81: top: AtomicInt,
82: array: AtomicPtr<Buffer<T>>,
83: pool: BufferPool<T>,
libstd/sync/mpsc_queue.rs:
62: struct Node<T> {
63: next: AtomicPtr<Node<T>>,
64: value: Option<T>,
libstd/sync/spsc_queue.rs:
59: tail: *mut Node<T>, // where to pop from
60: tail_prev: AtomicPtr<Node<T>>, // where to pop from
libstd/sync/mpsc_queue.rs:
70: pub struct Queue<T> {
71: head: AtomicPtr<Node<T>>,
72: tail: *mut Node<T>,
libstd/sync/atomics.rs:761:10-761:10 -fn- definition:
unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xadd_acq(dst, val),
references:- 2447: pub fn fetch_add(&self, val: int, order: Ordering) -> int {
448: unsafe { atomic_add(self.v.get(), val, order) }
449: }
--
559: pub fn fetch_add(&self, val: uint, order: Ordering) -> uint {
560: unsafe { atomic_add(self.v.get(), val, order) }
561: }
libstd/sync/atomics.rs:740:10-740:10 -fn- definition:
unsafe fn atomic_load<T>(dst: *T, order:Ordering) -> T {
match order {
Acquire => intrinsics::atomic_load_acq(dst),
references:- 5409: pub fn load(&self, order: Ordering) -> int {
410: unsafe { atomic_load(self.v.get() as *int, order) }
411: }
--
521: pub fn load(&self, order: Ordering) -> uint {
522: unsafe { atomic_load(self.v.get() as *uint, order) }
523: }
--
719: pub fn is_empty(&self, order: Ordering) -> bool {
720: unsafe { atomic_load(self.p.get() as *uint, order) as uint == 0 }
721: }
libstd/sync/atomics.rs:795:10-795:10 -fn- definition:
unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_and_acq(dst, val),
references:- 3590: pub fn fetch_and(&self, val: uint, order: Ordering) -> uint {
591: unsafe { atomic_and(self.v.get(), val, order) }
592: }
libstd/sync/atomics.rs:146:23-146:23 -struct- definition:
pub struct AtomicOption<T> {
p: Unsafe<uint>,
}
references:- 6671: /// Create a new `AtomicOption` that doesn't contain a value
672: pub fn empty() -> AtomicOption<T> { AtomicOption { p: Unsafe::new(0) } }
--
725: impl<T> Drop for AtomicOption<T> {
726: fn drop(&mut self) {
libstd/sync/atomics.rs:124:80-124:80 -struct- definition:
/// A signed atomic integer type, supporting basic atomic arithmetic operations
pub struct AtomicInt {
v: Unsafe<int>,
references:- 13185: /// An `AtomicInt` initialized to `0`
186: pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: Unsafe{value: 0,
187: marker1: marker::InvariantType},
--
403: pub fn new(v: int) -> AtomicInt {
404: AtomicInt {v: Unsafe::new(v), nocopy: marker::NoCopy}
405: }
libstd/sync/deque.rs:
80: bottom: AtomicInt,
81: top: AtomicInt,
82: array: AtomicPtr<Buffer<T>>,
libstd/os.rs:
769: static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
libstd/rt/backtrace.rs:
30: pub fn log_enabled() -> bool {
31: static mut ENABLED: atomics::AtomicInt = atomics::INIT_ATOMIC_INT;
32: unsafe {
libstd/comm/stream.rs:
44: cnt: atomics::AtomicInt, // How many items are on this channel
45: steals: int, // How many times has a port received without blocking?
libstd/comm/shared.rs:
45: queue: mpsc::Queue<T>,
46: cnt: atomics::AtomicInt, // How many items are on this channel
47: steals: int, // How many times has a port received without blocking?
--
55: port_dropped: atomics::AtomicBool,
56: sender_drain: atomics::AtomicInt,
libstd/sync/atomics.rs:784:10-784:10 -fn- definition:
unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_cxchg_acq(dst, old, new),
references:- 5276: unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) > 0 }
277: }
--
431: pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int {
432: unsafe { atomic_compare_and_swap(self.v.get(), old, new, order) }
433: }
--
704: let expected = cast::transmute(0);
705: let oldval = atomic_compare_and_swap(self.p.get(), expected, val, order);
706: if oldval == expected {
libstd/sync/atomics.rs:731:10-731:10 -fn- definition:
unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
match order {
Release => intrinsics::atomic_store_rel(dst, val),
references:- 4527: pub fn store(&self, val: uint, order: Ordering) {
528: unsafe { atomic_store(self.v.get(), val, order); }
529: }
--
641: pub fn store(&self, ptr: *mut T, order: Ordering) {
642: unsafe { atomic_store(self.p.get(), ptr as uint, order); }
643: }
libstd/sync/atomics.rs:118:28-118:28 -struct- definition:
/// An atomic boolean type.
pub struct AtomicBool {
v: Unsafe<uint>,
references:- 7200: let val = if v { UINT_TRUE } else { 0 };
201: AtomicBool { v: Unsafe::new(val), nocopy: marker::NoCopy }
202: }
libstd/comm/shared.rs:
54: // these are used for
55: port_dropped: atomics::AtomicBool,
56: sender_drain: atomics::AtomicInt,
libstd/comm/stream.rs:
48: port_dropped: atomics::AtomicBool, // flag if the channel has been destroyed.
49: }
libstd/sync/atomics.rs:161:55-161:55 -enum- definition:
/// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
pub enum Ordering {
/// No ordering constraints, only atomic operations
references:- 45libstd/sync/atomics.rs:830:10-830:10 -fn- definition:
unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
match order {
Acquire => intrinsics::atomic_xor_acq(dst, val),
references:- 3620: pub fn fetch_xor(&self, val: uint, order: Ordering) -> uint {
621: unsafe { atomic_xor(self.v.get(), val, order) }
622: }