(index<- ) ./libstd/sync/arc.rs
git branch: * master 5200215 auto merge of #14035 : alexcrichton/rust/experimental, r=huonw
modified: Fri May 9 13:02:28 2014
1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Atomically reference counted data
12 //!
13 //! This modules contains the implementation of an atomically reference counted
14 //! pointer for the purpose of sharing data between tasks. This is obviously a
15 //! very unsafe primitive to use, but it has its use cases when implementing
16 //! concurrent data structures and similar tasks.
17 //!
18 //! Great care must be taken to ensure that data races do not arise through the
19 //! usage of `UnsafeArc`, and this often requires some form of external
20 //! synchronization. The only guarantee provided to you by this class is that
21 //! the underlying data will remain valid (not free'd) so long as the reference
22 //! count is greater than one.
23
24 use cast;
25 use clone::Clone;
26 use iter::Iterator;
27 use kinds::Send;
28 use ops::Drop;
29 use owned::Box;
30 use ptr::RawPtr;
31 use sync::atomics::{fence, AtomicUint, Relaxed, Acquire, Release};
32 use ty::Unsafe;
33 use vec::Vec;
34
35 /// An atomically reference counted pointer.
36 ///
37 /// Enforces no shared-memory safety.
38 #[unsafe_no_drop_flag]
39 pub struct UnsafeArc<T> {
40 data: *mut ArcData<T>,
41 }
42
43 struct ArcData<T> {
44 count: AtomicUint,
45 data: Unsafe<T>,
46 }
47
48 unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
49 let data = box ArcData {
50 count: AtomicUint::new(refcount),
51 data: Unsafe::new(data)
52 };
53 cast::transmute(data)
54 }
55
56 impl<T: Send> UnsafeArc<T> {
57 /// Creates a new `UnsafeArc` which wraps the given data.
58 pub fn new(data: T) -> UnsafeArc<T> {
59 unsafe { UnsafeArc { data: new_inner(data, 1) } }
60 }
61
62 /// As new(), but returns an extra pre-cloned handle.
63 pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
64 unsafe {
65 let ptr = new_inner(data, 2);
66 (UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
67 }
68 }
69
70 /// As new(), but returns a vector of as many pre-cloned handles as
71 /// requested.
72 pub fn newN(data: T, num_handles: uint) -> Vec<UnsafeArc<T>> {
73 unsafe {
74 if num_handles == 0 {
75 vec![] // need to free data here
76 } else {
77 let ptr = new_inner(data, num_handles);
78 let v = Vec::from_fn(num_handles, |_| UnsafeArc { data: ptr });
79 v
80 }
81 }
82 }
83
84 /// Gets a pointer to the inner shared data. Note that care must be taken to
85 /// ensure that the outer `UnsafeArc` does not fall out of scope while this
86 /// pointer is in use, otherwise it could possibly contain a use-after-free.
87 #[inline]
88 pub fn get(&self) -> *mut T {
89 unsafe {
90 debug_assert!((*self.data).count.load(Relaxed) > 0);
91 return (*self.data).data.get();
92 }
93 }
94
95 /// Gets an immutable pointer to the inner shared data. This has the same
96 /// caveats as the `get` method.
97 #[inline]
98 pub fn get_immut(&self) -> *T {
99 unsafe {
100 debug_assert!((*self.data).count.load(Relaxed) > 0);
101 return (*self.data).data.get() as *T;
102 }
103 }
104
105 /// checks if this is the only reference to the arc protected data
106 #[inline]
107 pub fn is_owned(&self) -> bool {
108 unsafe {
109 (*self.data).count.load(Relaxed) == 1
110 }
111 }
112 }
113
114 impl<T: Send> Clone for UnsafeArc<T> {
115 fn clone(&self) -> UnsafeArc<T> {
116 unsafe {
117 // Using a relaxed ordering is alright here, as knowledge of the original reference
118 // prevents other threads from erroneously deleting the object.
119 //
120 // As explained in the [Boost documentation][1],
121 // Increasing the reference counter can always be done with memory_order_relaxed: New
122 // references to an object can only be formed from an existing reference, and passing
123 // an existing reference from one thread to another must already provide any required
124 // synchronization.
125 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
126 let old_count = (*self.data).count.fetch_add(1, Relaxed);
127 debug_assert!(old_count >= 1);
128 return UnsafeArc { data: self.data };
129 }
130 }
131 }
132
133 #[unsafe_destructor]
134 impl<T> Drop for UnsafeArc<T>{
135 fn drop(&mut self) {
136 unsafe {
137 // Happens when destructing an unwrapper's handle and from
138 // `#[unsafe_no_drop_flag]`
139 if self.data.is_null() {
140 return
141 }
142 // Because `fetch_sub` is already atomic, we do not need to synchronize with other
143 // threads unless we are going to delete the object.
144 let old_count = (*self.data).count.fetch_sub(1, Release);
145 debug_assert!(old_count >= 1);
146 if old_count == 1 {
147 // This fence is needed to prevent reordering of use of the data and deletion of
148 // the data. Because it is marked `Release`, the decreasing of the reference count
149 // sychronizes with this `Acquire` fence. This means that use of the data happens
150 // before decreasing the refernce count, which happens before this fence, which
151 // happens before the deletion of the data.
152 //
153 // As explained in the [Boost documentation][1],
154 // It is important to enforce any possible access to the object in one thread
155 // (through an existing reference) to *happen before* deleting the object in a
156 // different thread. This is achieved by a "release" operation after dropping a
157 // reference (any access to the object through this reference must obviously
158 // happened before), and an "acquire" operation before deleting the object.
159 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
160 fence(Acquire);
161 let _: Box<ArcData<T>> = cast::transmute(self.data);
162 }
163 }
164 }
165 }
166
167 #[cfg(test)]
168 mod tests {
169 use prelude::*;
170 use super::UnsafeArc;
171 use mem::size_of;
172
173 #[test]
174 fn test_size() {
175 assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
176 }
177
178 #[test]
179 fn arclike_newN() {
180 // Tests that the many-refcounts-at-once constructors don't leak.
181 let _ = UnsafeArc::new2("hello".to_owned().to_owned());
182 let x = UnsafeArc::newN("hello".to_owned().to_owned(), 0);
183 assert_eq!(x.len(), 0)
184 let x = UnsafeArc::newN("hello".to_owned().to_owned(), 1);
185 assert_eq!(x.len(), 1)
186 let x = UnsafeArc::newN("hello".to_owned().to_owned(), 10);
187 assert_eq!(x.len(), 10)
188 }
189 }
libstd/sync/arc.rs:42:1-42:1 -struct- definition:
struct ArcData<T> {
count: AtomicUint,
data: Unsafe<T>,
references:- 448: unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
49: let data = box ArcData {
50: count: AtomicUint::new(refcount),
--
160: fence(Acquire);
161: let _: Box<ArcData<T>> = cast::transmute(self.data);
162: }
libstd/sync/arc.rs:47:1-47:1 -fn- definition:
unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
let data = box ArcData {
count: AtomicUint::new(refcount),
references:- 358: pub fn new(data: T) -> UnsafeArc<T> {
59: unsafe { UnsafeArc { data: new_inner(data, 1) } }
60: }
--
76: } else {
77: let ptr = new_inner(data, num_handles);
78: let v = Vec::from_fn(num_handles, |_| UnsafeArc { data: ptr });
libstd/sync/arc.rs:38:23-38:23 -struct- definition:
pub struct UnsafeArc<T> {
data: *mut ArcData<T>,
}
references:- 2665: let ptr = new_inner(data, 2);
66: (UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
67: }
--
127: debug_assert!(old_count >= 1);
128: return UnsafeArc { data: self.data };
129: }
--
134: impl<T> Drop for UnsafeArc<T>{
135: fn drop(&mut self) {
libstd/sync/deque.rs:
90: pub struct Worker<T> {
91: deque: UnsafeArc<Deque<T>>,
92: }
--
97: pub struct Stealer<T> {
98: deque: UnsafeArc<Deque<T>>,
99: }
libstd/sync/mpmc_bounded_queue.rs:
56: pub struct Queue<T> {
57: state: UnsafeArc<State<T>>,
58: }
libstd/unstable/sync.rs:
32: pub struct Exclusive<T> {
33: x: UnsafeArc<ExData<T>>
34: }
libstd/rt/task.rs:
68: Owned(Box<Task>),
69: Shared(UnsafeArc<AtomicUint>),
70: }
--
378: } else {
379: let ptr: Box<UnsafeArc<AtomicUint>> =
380: cast::transmute(blocked_task_ptr & !1);
libstd/comm/mod.rs:
354: pub struct SyncSender<T> {
355: inner: UnsafeArc<sync::Packet<T>>,
356: // can't share in an arc
--
389: Oneshot(UnsafeArc<oneshot::Packet<T>>),
390: Stream(UnsafeArc<stream::Packet<T>>),
391: Shared(UnsafeArc<shared::Packet<T>>),
392: Sync(UnsafeArc<sync::Packet<T>>),
--
647: impl<T: Send> SyncSender<T> {
648: fn new(inner: UnsafeArc<sync::Packet<T>>) -> SyncSender<T> {
649: SyncSender { inner: inner, marker: marker::NoShare }