(index<- ) ./libsync/lock.rs
git branch: * master 5200215 auto merge of #14035 : alexcrichton/rust/experimental, r=huonw
modified: Fri Apr 25 22:40:04 2014
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Wrappers for safe, shared, mutable memory between tasks
12 //!
13 //! The wrappers in this module build on the primitives from `sync::raw` to
14 //! provide safe interfaces around using the primitive locks. These primitives
15 //! implement a technique called "poisoning" where when a task failed with a
16 //! held lock, all future attempts to use the lock will fail.
17 //!
18 //! For example, if two tasks are contending on a mutex and one of them fails
19 //! after grabbing the lock, the second task will immediately fail because the
20 //! lock is now poisoned.
21
22 use std::task;
23 use std::ty::Unsafe;
24
25 use raw;
26
27 /****************************************************************************
28 * Poisoning helpers
29 ****************************************************************************/
30
31 struct PoisonOnFail<'a> {
32 flag: &'a mut bool,
33 failed: bool,
34 }
35
36 impl<'a> PoisonOnFail<'a> {
37 fn check(flag: bool, name: &str) {
38 if flag {
39 fail!("Poisoned {} - another task failed inside!", name);
40 }
41 }
42
43 fn new<'a>(flag: &'a mut bool, name: &str) -> PoisonOnFail<'a> {
44 PoisonOnFail::check(*flag, name);
45 PoisonOnFail {
46 flag: flag,
47 failed: task::failing()
48 }
49 }
50 }
51
52 #[unsafe_destructor]
53 impl<'a> Drop for PoisonOnFail<'a> {
54 fn drop(&mut self) {
55 if !self.failed && task::failing() {
56 *self.flag = true;
57 }
58 }
59 }
60
61 /****************************************************************************
62 * Condvar
63 ****************************************************************************/
64
65 enum Inner<'a> {
66 InnerMutex(raw::MutexGuard<'a>),
67 InnerRWLock(raw::RWLockWriteGuard<'a>),
68 }
69
70 impl<'b> Inner<'b> {
71 fn cond<'a>(&'a self) -> &'a raw::Condvar<'b> {
72 match *self {
73 InnerMutex(ref m) => &m.cond,
74 InnerRWLock(ref m) => &m.cond,
75 }
76 }
77 }
78
79 /// A condition variable, a mechanism for unlock-and-descheduling and
80 /// signaling, for use with the lock types.
81 pub struct Condvar<'a> {
82 name: &'static str,
83 // n.b. Inner must be after PoisonOnFail because we must set the poison flag
84 // *inside* the mutex, and struct fields are destroyed top-to-bottom
85 // (destroy the lock guard last).
86 poison: PoisonOnFail<'a>,
87 inner: Inner<'a>,
88 }
89
90 impl<'a> Condvar<'a> {
91 /// Atomically exit the associated lock and block until a signal is sent.
92 ///
93 /// wait() is equivalent to wait_on(0).
94 ///
95 /// # Failure
96 ///
97 /// A task which is killed while waiting on a condition variable will wake
98 /// up, fail, and unlock the associated lock as it unwinds.
99 #[inline]
100 pub fn wait(&self) { self.wait_on(0) }
101
102 /// Atomically exit the associated lock and block on a specified condvar
103 /// until a signal is sent on that same condvar.
104 ///
105 /// The associated lock must have been initialised with an appropriate
106 /// number of condvars. The condvar_id must be between 0 and num_condvars-1
107 /// or else this call will fail.
108 #[inline]
109 pub fn wait_on(&self, condvar_id: uint) {
110 assert!(!*self.poison.flag);
111 self.inner.cond().wait_on(condvar_id);
112 // This is why we need to wrap sync::condvar.
113 PoisonOnFail::check(*self.poison.flag, self.name);
114 }
115
116 /// Wake up a blocked task. Returns false if there was no blocked task.
117 #[inline]
118 pub fn signal(&self) -> bool { self.signal_on(0) }
119
120 /// Wake up a blocked task on a specified condvar (as
121 /// sync::cond.signal_on). Returns false if there was no blocked task.
122 #[inline]
123 pub fn signal_on(&self, condvar_id: uint) -> bool {
124 assert!(!*self.poison.flag);
125 self.inner.cond().signal_on(condvar_id)
126 }
127
128 /// Wake up all blocked tasks. Returns the number of tasks woken.
129 #[inline]
130 pub fn broadcast(&self) -> uint { self.broadcast_on(0) }
131
132 /// Wake up all blocked tasks on a specified condvar (as
133 /// sync::cond.broadcast_on). Returns the number of tasks woken.
134 #[inline]
135 pub fn broadcast_on(&self, condvar_id: uint) -> uint {
136 assert!(!*self.poison.flag);
137 self.inner.cond().broadcast_on(condvar_id)
138 }
139 }
140
141 /****************************************************************************
142 * Mutex
143 ****************************************************************************/
144
145 /// A wrapper type which provides synchronized access to the underlying data, of
146 /// type `T`. A mutex always provides exclusive access, and concurrent requests
147 /// will block while the mutex is already locked.
148 ///
149 /// # Example
150 ///
151 /// ```
152 /// use sync::{Mutex, Arc};
153 ///
154 /// let mutex = Arc::new(Mutex::new(1));
155 /// let mutex2 = mutex.clone();
156 ///
157 /// spawn(proc() {
158 /// let mut val = mutex2.lock();
159 /// *val += 1;
160 /// val.cond.signal();
161 /// });
162 ///
163 /// let mut value = mutex.lock();
164 /// while *value != 2 {
165 /// value.cond.wait();
166 /// }
167 /// ```
168 pub struct Mutex<T> {
169 lock: raw::Mutex,
170 failed: Unsafe<bool>,
171 data: Unsafe<T>,
172 }
173
174 /// An guard which is created by locking a mutex. Through this guard the
175 /// underlying data can be accessed.
176 pub struct MutexGuard<'a, T> {
177 data: &'a mut T,
178 /// Inner condition variable connected to the locked mutex that this guard
179 /// was created from. This can be used for atomic-unlock-and-deschedule.
180 pub cond: Condvar<'a>,
181 }
182
183 impl<T: Send> Mutex<T> {
184 /// Creates a new mutex to protect the user-supplied data.
185 pub fn new(user_data: T) -> Mutex<T> {
186 Mutex::new_with_condvars(user_data, 1)
187 }
188
189 /// Create a new mutex, with a specified number of associated condvars.
190 ///
191 /// This will allow calling wait_on/signal_on/broadcast_on with condvar IDs
192 /// between 0 and num_condvars-1. (If num_condvars is 0, lock_cond will be
193 /// allowed but any operations on the condvar will fail.)
194 pub fn new_with_condvars(user_data: T, num_condvars: uint) -> Mutex<T> {
195 Mutex {
196 lock: raw::Mutex::new_with_condvars(num_condvars),
197 failed: Unsafe::new(false),
198 data: Unsafe::new(user_data),
199 }
200 }
201
202 /// Access the underlying mutable data with mutual exclusion from other
203 /// tasks. The returned value is an RAII guard which will unlock the mutex
204 /// when dropped. All concurrent tasks attempting to lock the mutex will
205 /// block while the returned value is still alive.
206 ///
207 /// # Failure
208 ///
209 /// Failing while inside the Mutex will unlock the Mutex while unwinding, so
210 /// that other tasks won't block forever. It will also poison the Mutex:
211 /// any tasks that subsequently try to access it (including those already
212 /// blocked on the mutex) will also fail immediately.
213 #[inline]
214 pub fn lock<'a>(&'a self) -> MutexGuard<'a, T> {
215 let guard = self.lock.lock();
216
217 // These two accesses are safe because we're guranteed at this point
218 // that we have exclusive access to this mutex. We are indeed able to
219 // promote ourselves from &Mutex to `&mut T`
220 let poison = unsafe { &mut *self.failed.get() };
221 let data = unsafe { &mut *self.data.get() };
222
223 MutexGuard {
224 data: data,
225 cond: Condvar {
226 name: "Mutex",
227 poison: PoisonOnFail::new(poison, "Mutex"),
228 inner: InnerMutex(guard),
229 },
230 }
231 }
232 }
233
234 impl<'a, T: Send> Deref<T> for MutexGuard<'a, T> {
235 fn deref<'a>(&'a self) -> &'a T { &*self.data }
236 }
237 impl<'a, T: Send> DerefMut<T> for MutexGuard<'a, T> {
238 fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
239 }
240
241 /****************************************************************************
242 * R/W lock protected lock
243 ****************************************************************************/
244
245 /// A dual-mode reader-writer lock. The data can be accessed mutably or
246 /// immutably, and immutably-accessing tasks may run concurrently.
247 ///
248 /// # Example
249 ///
250 /// ```
251 /// use sync::{RWLock, Arc};
252 ///
253 /// let lock1 = Arc::new(RWLock::new(1));
254 /// let lock2 = lock1.clone();
255 ///
256 /// spawn(proc() {
257 /// let mut val = lock2.write();
258 /// *val = 3;
259 /// let val = val.downgrade();
260 /// println!("{}", *val);
261 /// });
262 ///
263 /// let val = lock1.read();
264 /// println!("{}", *val);
265 /// ```
266 pub struct RWLock<T> {
267 lock: raw::RWLock,
268 failed: Unsafe<bool>,
269 data: Unsafe<T>,
270 }
271
272 /// A guard which is created by locking an rwlock in write mode. Through this
273 /// guard the underlying data can be accessed.
274 pub struct RWLockWriteGuard<'a, T> {
275 data: &'a mut T,
276 /// Inner condition variable that can be used to sleep on the write mode of
277 /// this rwlock.
278 pub cond: Condvar<'a>,
279 }
280
281 /// A guard which is created by locking an rwlock in read mode. Through this
282 /// guard the underlying data can be accessed.
283 pub struct RWLockReadGuard<'a, T> {
284 data: &'a T,
285 guard: raw::RWLockReadGuard<'a>,
286 }
287
288 impl<T: Send + Share> RWLock<T> {
289 /// Create a reader/writer lock with the supplied data.
290 pub fn new(user_data: T) -> RWLock<T> {
291 RWLock::new_with_condvars(user_data, 1)
292 }
293
294 /// Create a reader/writer lock with the supplied data and a specified number
295 /// of condvars (as sync::RWLock::new_with_condvars).
296 pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWLock<T> {
297 RWLock {
298 lock: raw::RWLock::new_with_condvars(num_condvars),
299 failed: Unsafe::new(false),
300 data: Unsafe::new(user_data),
301 }
302 }
303
304 /// Access the underlying data mutably. Locks the rwlock in write mode;
305 /// other readers and writers will block.
306 ///
307 /// # Failure
308 ///
309 /// Failing while inside the lock will unlock the lock while unwinding, so
310 /// that other tasks won't block forever. As Mutex.lock, it will also poison
311 /// the lock, so subsequent readers and writers will both also fail.
312 #[inline]
313 pub fn write<'a>(&'a self) -> RWLockWriteGuard<'a, T> {
314 let guard = self.lock.write();
315
316 // These two accesses are safe because we're guranteed at this point
317 // that we have exclusive access to this rwlock. We are indeed able to
318 // promote ourselves from &RWLock to `&mut T`
319 let poison = unsafe { &mut *self.failed.get() };
320 let data = unsafe { &mut *self.data.get() };
321
322 RWLockWriteGuard {
323 data: data,
324 cond: Condvar {
325 name: "RWLock",
326 poison: PoisonOnFail::new(poison, "RWLock"),
327 inner: InnerRWLock(guard),
328 },
329 }
330 }
331
332 /// Access the underlying data immutably. May run concurrently with other
333 /// reading tasks.
334 ///
335 /// # Failure
336 ///
337 /// Failing will unlock the lock while unwinding. However, unlike all other
338 /// access modes, this will not poison the lock.
339 pub fn read<'a>(&'a self) -> RWLockReadGuard<'a, T> {
340 let guard = self.lock.read();
341 PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock");
342 RWLockReadGuard {
343 guard: guard,
344 data: unsafe { &*self.data.get() },
345 }
346 }
347 }
348
349 impl<'a, T: Send + Share> RWLockWriteGuard<'a, T> {
350 /// Consumes this write lock token, returning a new read lock token.
351 ///
352 /// This will allow pending readers to come into the lock.
353 pub fn downgrade(self) -> RWLockReadGuard<'a, T> {
354 let RWLockWriteGuard { data, cond } = self;
355 // convert the data to read-only explicitly
356 let data = &*data;
357 let guard = match cond.inner {
358 InnerMutex(..) => unreachable!(),
359 InnerRWLock(guard) => guard.downgrade()
360 };
361 RWLockReadGuard { guard: guard, data: data }
362 }
363 }
364
365 impl<'a, T: Send + Share> Deref<T> for RWLockReadGuard<'a, T> {
366 fn deref<'a>(&'a self) -> &'a T { self.data }
367 }
368 impl<'a, T: Send + Share> Deref<T> for RWLockWriteGuard<'a, T> {
369 fn deref<'a>(&'a self) -> &'a T { &*self.data }
370 }
371 impl<'a, T: Send + Share> DerefMut<T> for RWLockWriteGuard<'a, T> {
372 fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
373 }
374
375 /****************************************************************************
376 * Barrier
377 ****************************************************************************/
378
379 /// A barrier enables multiple tasks to synchronize the beginning
380 /// of some computation.
381 ///
382 /// ```rust
383 /// use sync::{Arc, Barrier};
384 ///
385 /// let barrier = Arc::new(Barrier::new(10));
386 /// for _ in range(0, 10) {
387 /// let c = barrier.clone();
388 /// // The same messages will be printed together.
389 /// // You will NOT see any interleaving.
390 /// spawn(proc() {
391 /// println!("before wait");
392 /// c.wait();
393 /// println!("after wait");
394 /// });
395 /// }
396 /// ```
397 pub struct Barrier {
398 lock: Mutex<BarrierState>,
399 num_tasks: uint,
400 }
401
402 // The inner state of a double barrier
403 struct BarrierState {
404 count: uint,
405 generation_id: uint,
406 }
407
408 impl Barrier {
409 /// Create a new barrier that can block a given number of tasks.
410 pub fn new(num_tasks: uint) -> Barrier {
411 Barrier {
412 lock: Mutex::new(BarrierState {
413 count: 0,
414 generation_id: 0,
415 }),
416 num_tasks: num_tasks,
417 }
418 }
419
420 /// Block the current task until a certain number of tasks is waiting.
421 pub fn wait(&self) {
422 let mut lock = self.lock.lock();
423 let local_gen = lock.generation_id;
424 lock.count += 1;
425 if lock.count < self.num_tasks {
426 // We need a while loop to guard against spurious wakeups.
427 // http://en.wikipedia.org/wiki/Spurious_wakeup
428 while local_gen == lock.generation_id &&
429 lock.count < self.num_tasks {
430 lock.cond.wait();
431 }
432 } else {
433 lock.count = 0;
434 lock.generation_id += 1;
435 lock.cond.broadcast();
436 }
437 }
438 }
439
440 /****************************************************************************
441 * Tests
442 ****************************************************************************/
443
444 #[cfg(test)]
445 mod tests {
446 use std::comm::Empty;
447 use std::task;
448 use std::task::TaskBuilder;
449
450 use arc::Arc;
451 use super::{Mutex, Barrier, RWLock};
452
453 #[test]
454 fn test_mutex_arc_condvar() {
455 let arc = Arc::new(Mutex::new(false));
456 let arc2 = arc.clone();
457 let (tx, rx) = channel();
458 task::spawn(proc() {
459 // wait until parent gets in
460 rx.recv();
461 let mut lock = arc2.lock();
462 *lock = true;
463 lock.cond.signal();
464 });
465
466 let lock = arc.lock();
467 tx.send(());
468 assert!(!*lock);
469 while !*lock {
470 lock.cond.wait();
471 }
472 }
473
474 #[test] #[should_fail]
475 fn test_arc_condvar_poison() {
476 let arc = Arc::new(Mutex::new(1));
477 let arc2 = arc.clone();
478 let (tx, rx) = channel();
479
480 spawn(proc() {
481 rx.recv();
482 let lock = arc2.lock();
483 lock.cond.signal();
484 // Parent should fail when it wakes up.
485 fail!();
486 });
487
488 let lock = arc.lock();
489 tx.send(());
490 while *lock == 1 {
491 lock.cond.wait();
492 }
493 }
494
495 #[test] #[should_fail]
496 fn test_mutex_arc_poison() {
497 let arc = Arc::new(Mutex::new(1));
498 let arc2 = arc.clone();
499 let _ = task::try(proc() {
500 let lock = arc2.lock();
501 assert_eq!(*lock, 2);
502 });
503 let lock = arc.lock();
504 assert_eq!(*lock, 1);
505 }
506
507 #[test]
508 fn test_mutex_arc_nested() {
509 // Tests nested mutexes and access
510 // to underlaying data.
511 let arc = Arc::new(Mutex::new(1));
512 let arc2 = Arc::new(Mutex::new(arc));
513 task::spawn(proc() {
514 let lock = arc2.lock();
515 let lock2 = lock.deref().lock();
516 assert_eq!(*lock2, 1);
517 });
518 }
519
520 #[test]
521 fn test_mutex_arc_access_in_unwind() {
522 let arc = Arc::new(Mutex::new(1i));
523 let arc2 = arc.clone();
524 let _ = task::try::<()>(proc() {
525 struct Unwinder {
526 i: Arc<Mutex<int>>,
527 }
528 impl Drop for Unwinder {
529 fn drop(&mut self) {
530 let mut lock = self.i.lock();
531 *lock += 1;
532 }
533 }
534 let _u = Unwinder { i: arc2 };
535 fail!();
536 });
537 let lock = arc.lock();
538 assert_eq!(*lock, 2);
539 }
540
541 #[test] #[should_fail]
542 fn test_rw_arc_poison_wr() {
543 let arc = Arc::new(RWLock::new(1));
544 let arc2 = arc.clone();
545 let _ = task::try(proc() {
546 let lock = arc2.write();
547 assert_eq!(*lock, 2);
548 });
549 let lock = arc.read();
550 assert_eq!(*lock, 1);
551 }
552 #[test] #[should_fail]
553 fn test_rw_arc_poison_ww() {
554 let arc = Arc::new(RWLock::new(1));
555 let arc2 = arc.clone();
556 let _ = task::try(proc() {
557 let lock = arc2.write();
558 assert_eq!(*lock, 2);
559 });
560 let lock = arc.write();
561 assert_eq!(*lock, 1);
562 }
563 #[test]
564 fn test_rw_arc_no_poison_rr() {
565 let arc = Arc::new(RWLock::new(1));
566 let arc2 = arc.clone();
567 let _ = task::try(proc() {
568 let lock = arc2.read();
569 assert_eq!(*lock, 2);
570 });
571 let lock = arc.read();
572 assert_eq!(*lock, 1);
573 }
574 #[test]
575 fn test_rw_arc_no_poison_rw() {
576 let arc = Arc::new(RWLock::new(1));
577 let arc2 = arc.clone();
578 let _ = task::try(proc() {
579 let lock = arc2.read();
580 assert_eq!(*lock, 2);
581 });
582 let lock = arc.write();
583 assert_eq!(*lock, 1);
584 }
585 #[test]
586 fn test_rw_arc_no_poison_dr() {
587 let arc = Arc::new(RWLock::new(1));
588 let arc2 = arc.clone();
589 let _ = task::try(proc() {
590 let lock = arc2.write().downgrade();
591 assert_eq!(*lock, 2);
592 });
593 let lock = arc.write();
594 assert_eq!(*lock, 1);
595 }
596
597 #[test]
598 fn test_rw_arc() {
599 let arc = Arc::new(RWLock::new(0));
600 let arc2 = arc.clone();
601 let (tx, rx) = channel();
602
603 task::spawn(proc() {
604 let mut lock = arc2.write();
605 for _ in range(0, 10) {
606 let tmp = *lock;
607 *lock = -1;
608 task::deschedule();
609 *lock = tmp + 1;
610 }
611 tx.send(());
612 });
613
614 // Readers try to catch the writer in the act
615 let mut children = Vec::new();
616 for _ in range(0, 5) {
617 let arc3 = arc.clone();
618 let mut builder = TaskBuilder::new();
619 children.push(builder.future_result());
620 builder.spawn(proc() {
621 let lock = arc3.read();
622 assert!(*lock >= 0);
623 });
624 }
625
626 // Wait for children to pass their asserts
627 for r in children.mut_iter() {
628 assert!(r.recv().is_ok());
629 }
630
631 // Wait for writer to finish
632 rx.recv();
633 let lock = arc.read();
634 assert_eq!(*lock, 10);
635 }
636
637 #[test]
638 fn test_rw_arc_access_in_unwind() {
639 let arc = Arc::new(RWLock::new(1i));
640 let arc2 = arc.clone();
641 let _ = task::try::<()>(proc() {
642 struct Unwinder {
643 i: Arc<RWLock<int>>,
644 }
645 impl Drop for Unwinder {
646 fn drop(&mut self) {
647 let mut lock = self.i.write();
648 *lock += 1;
649 }
650 }
651 let _u = Unwinder { i: arc2 };
652 fail!();
653 });
654 let lock = arc.read();
655 assert_eq!(*lock, 2);
656 }
657
658 #[test]
659 fn test_rw_downgrade() {
660 // (1) A downgrader gets in write mode and does cond.wait.
661 // (2) A writer gets in write mode, sets state to 42, and does signal.
662 // (3) Downgrader wakes, sets state to 31337.
663 // (4) tells writer and all other readers to contend as it downgrades.
664 // (5) Writer attempts to set state back to 42, while downgraded task
665 // and all reader tasks assert that it's 31337.
666 let arc = Arc::new(RWLock::new(0));
667
668 // Reader tasks
669 let mut reader_convos = Vec::new();
670 for _ in range(0, 10) {
671 let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
672 reader_convos.push((tx1, rx2));
673 let arcn = arc.clone();
674 task::spawn(proc() {
675 rx1.recv(); // wait for downgrader to give go-ahead
676 let lock = arcn.read();
677 assert_eq!(*lock, 31337);
678 tx2.send(());
679 });
680 }
681
682 // Writer task
683 let arc2 = arc.clone();
684 let ((tx1, rx1), (tx2, rx2)) = (channel(), channel());
685 task::spawn(proc() {
686 rx1.recv();
687 {
688 let mut lock = arc2.write();
689 assert_eq!(*lock, 0);
690 *lock = 42;
691 lock.cond.signal();
692 }
693 rx1.recv();
694 {
695 let mut lock = arc2.write();
696 // This shouldn't happen until after the downgrade read
697 // section, and all other readers, finish.
698 assert_eq!(*lock, 31337);
699 *lock = 42;
700 }
701 tx2.send(());
702 });
703
704 // Downgrader (us)
705 let mut lock = arc.write();
706 tx1.send(()); // send to another writer who will wake us up
707 while *lock == 0 {
708 lock.cond.wait();
709 }
710 assert_eq!(*lock, 42);
711 *lock = 31337;
712 // send to other readers
713 for &(ref mut rc, _) in reader_convos.mut_iter() {
714 rc.send(())
715 }
716 let lock = lock.downgrade();
717 // complete handshake with other readers
718 for &(_, ref mut rp) in reader_convos.mut_iter() {
719 rp.recv()
720 }
721 tx1.send(()); // tell writer to try again
722 assert_eq!(*lock, 31337);
723 drop(lock);
724
725 rx2.recv(); // complete handshake with writer
726 }
727
728 #[cfg(test)]
729 fn test_rw_write_cond_downgrade_read_race_helper() {
730 // Tests that when a downgrader hands off the "reader cloud" lock
731 // because of a contending reader, a writer can't race to get it
732 // instead, which would result in readers_and_writers. This tests
733 // the raw module rather than this one, but it's here because an
734 // rwarc gives us extra shared state to help check for the race.
735 let x = Arc::new(RWLock::new(true));
736 let (tx, rx) = channel();
737
738 // writer task
739 let xw = x.clone();
740 task::spawn(proc() {
741 let mut lock = xw.write();
742 tx.send(()); // tell downgrader it's ok to go
743 lock.cond.wait();
744 // The core of the test is here: the condvar reacquire path
745 // must involve order_lock, so that it cannot race with a reader
746 // trying to receive the "reader cloud lock hand-off".
747 *lock = false;
748 });
749
750 rx.recv(); // wait for writer to get in
751
752 let lock = x.write();
753 assert!(*lock);
754 // make writer contend in the cond-reacquire path
755 lock.cond.signal();
756 // make a reader task to trigger the "reader cloud lock" handoff
757 let xr = x.clone();
758 let (tx, rx) = channel();
759 task::spawn(proc() {
760 tx.send(());
761 drop(xr.read());
762 });
763 rx.recv(); // wait for reader task to exist
764
765 let lock = lock.downgrade();
766 // if writer mistakenly got in, make sure it mutates state
767 // before we assert on it
768 for _ in range(0, 5) { task::deschedule(); }
769 // make sure writer didn't get in.
770 assert!(*lock);
771 }
772 #[test]
773 fn test_rw_write_cond_downgrade_read_race() {
774 // Ideally the above test case would have deschedule statements in it
775 // that helped to expose the race nearly 100% of the time... but adding
776 // deschedules in the intuitively-right locations made it even less
777 // likely, and I wasn't sure why :( . This is a mediocre "next best"
778 // option.
779 for _ in range(0, 8) {
780 test_rw_write_cond_downgrade_read_race_helper();
781 }
782 }
783
784 /************************************************************************
785 * Barrier tests
786 ************************************************************************/
787 #[test]
788 fn test_barrier() {
789 let barrier = Arc::new(Barrier::new(10));
790 let (tx, rx) = channel();
791
792 for _ in range(0, 9) {
793 let c = barrier.clone();
794 let tx = tx.clone();
795 spawn(proc() {
796 c.wait();
797 tx.send(true);
798 });
799 }
800
801 // At this point, all spawned tasks should be blocked,
802 // so we shouldn't get anything from the port
803 assert!(match rx.try_recv() {
804 Err(Empty) => true,
805 _ => false,
806 });
807
808 barrier.wait();
809 // Now, the barrier is cleared and we should get data.
810 for _ in range(0, 9) {
811 rx.recv();
812 }
813 }
814 }
815
libsync/lock.rs:175:37-175:37 -struct- definition:
/// underlying data can be accessed.
pub struct MutexGuard<'a, T> {
data: &'a mut T,
references:- 11223: MutexGuard {
224: data: data,
--
433: lock.count = 0;
434: lock.generation_id += 1;
435: lock.cond.broadcast();
libsync/lock.rs:265:8-265:8 -struct- definition:
/// ```
pub struct RWLock<T> {
lock: raw::RWLock,
references:- 4296: pub fn new_with_condvars(user_data: T, num_condvars: uint) -> RWLock<T> {
297: RWLock {
298: lock: raw::RWLock::new_with_condvars(num_condvars),
libsync/lock.rs:64:1-64:1 -enum- definition:
enum Inner<'a> {
InnerMutex(raw::MutexGuard<'a>),
InnerRWLock(raw::RWLockWriteGuard<'a>),
references:- 270: impl<'b> Inner<'b> {
71: fn cond<'a>(&'a self) -> &'a raw::Condvar<'b> {
--
86: poison: PoisonOnFail<'a>,
87: inner: Inner<'a>,
88: }
libsync/lock.rs:167:8-167:8 -struct- definition:
/// ```
pub struct Mutex<T> {
lock: raw::Mutex,
references:- 5194: pub fn new_with_condvars(user_data: T, num_condvars: uint) -> Mutex<T> {
195: Mutex {
196: lock: raw::Mutex::new_with_condvars(num_condvars),
--
397: pub struct Barrier {
398: lock: Mutex<BarrierState>,
399: num_tasks: uint,
libsync/lock.rs:80:44-80:44 -struct- definition:
/// signaling, for use with the lock types.
pub struct Condvar<'a> {
name: &'static str,
references:- 5224: data: data,
225: cond: Condvar {
226: name: "Mutex",
--
323: data: data,
324: cond: Condvar {
325: name: "RWLock",
libsync/lock.rs:30:1-30:1 -struct- definition:
struct PoisonOnFail<'a> {
flag: &'a mut bool,
failed: bool,
references:- 544: PoisonOnFail::check(*flag, name);
45: PoisonOnFail {
46: flag: flag,
--
53: impl<'a> Drop for PoisonOnFail<'a> {
54: fn drop(&mut self) {
--
85: // (destroy the lock guard last).
86: poison: PoisonOnFail<'a>,
87: inner: Inner<'a>,
libsync/lock.rs:273:47-273:47 -struct- definition:
/// guard the underlying data can be accessed.
pub struct RWLockWriteGuard<'a, T> {
data: &'a mut T,
references:- 6322: RWLockWriteGuard {
323: data: data,
--
367: }
368: impl<'a, T: Send + Share> Deref<T> for RWLockWriteGuard<'a, T> {
369: fn deref<'a>(&'a self) -> &'a T { &*self.data }
370: }
371: impl<'a, T: Send + Share> DerefMut<T> for RWLockWriteGuard<'a, T> {
372: fn deref_mut<'a>(&'a mut self) -> &'a mut T { &mut *self.data }
libsync/lock.rs:282:47-282:47 -struct- definition:
/// guard the underlying data can be accessed.
pub struct RWLockReadGuard<'a, T> {
data: &'a T,
references:- 5341: PoisonOnFail::check(unsafe { *self.failed.get() }, "RWLock");
342: RWLockReadGuard {
343: guard: guard,
--
365: impl<'a, T: Send + Share> Deref<T> for RWLockReadGuard<'a, T> {
366: fn deref<'a>(&'a self) -> &'a T { self.data }
libsync/lock.rs:396:8-396:8 -struct- definition:
/// ```
pub struct Barrier {
lock: Mutex<BarrierState>,
references:- 3408: impl Barrier {
409: /// Create a new barrier that can block a given number of tasks.
410: pub fn new(num_tasks: uint) -> Barrier {
411: Barrier {
412: lock: Mutex::new(BarrierState {
libsync/lock.rs:402:39-402:39 -struct- definition:
// The inner state of a double barrier
struct BarrierState {
count: uint,
references:- 2411: Barrier {
412: lock: Mutex::new(BarrierState {
413: count: 0,