1 // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! A native mutex and condition variable type.
12 //!
13 //! This module contains bindings to the platform's native mutex/condition
14 //! variable primitives. It provides two types: `StaticNativeMutex`, which can
15 //! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple
16 //! wrapper `NativeMutex` that has a destructor to clean up after itself. These
17 //! objects serve as both mutexes and condition variables simultaneously.
18 //!
19 //! The static lock is lazily initialized, but it can only be unsafely
20 //! destroyed. A statically initialized lock doesn't necessarily have a time at
21 //! which it can get deallocated. For this reason, there is no `Drop`
22 //! implementation of the static mutex, but rather the `destroy()` method must
23 //! be invoked manually if destruction of the mutex is desired.
24 //!
25 //! The non-static `NativeMutex` type does have a destructor, but cannot be
26 //! statically initialized.
27 //!
28 //! It is not recommended to use this type for idiomatic rust use. These types
29 //! are appropriate where no other options are available, but other rust
30 //! concurrency primitives should be used before them: the `sync` crate defines
31 //! `StaticMutex` and `Mutex` types.
32 //!
33 //! # Example
34 //!
35 //! ```rust
36 //! use std::unstable::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT};
37 //!
38 //! // Use a statically initialized mutex
39 //! static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
40 //!
41 //! unsafe {
42 //! let _guard = LOCK.lock();
43 //! } // automatically unlocked here
44 //!
45 //! // Use a normally initialized mutex
46 //! unsafe {
47 //! let mut lock = NativeMutex::new();
48 //!
49 //! {
50 //! let _guard = lock.lock();
51 //! } // unlocked here
52 //!
53 //! // sometimes the RAII guard isn't appropriate
54 //! lock.lock_noguard();
55 //! lock.unlock_noguard();
56 //! } // `lock` is deallocated here
57 //! ```
58
59 #![allow(non_camel_case_types)]
60
61 use option::{Option, None, Some};
62 use ops::Drop;
63
64 /// A native mutex suitable for storing in statics (that is, it has
65 /// the `destroy` method rather than a destructor).
66 ///
67 /// Prefer the `NativeMutex` type where possible, since that does not
68 /// require manual deallocation.
69 pub struct StaticNativeMutex {
70 inner: imp::Mutex,
71 }
72
73 /// A native mutex with a destructor for clean-up.
74 ///
75 /// See `StaticNativeMutex` for a version that is suitable for storing in
76 /// statics.
77 pub struct NativeMutex {
78 inner: StaticNativeMutex
79 }
80
81 /// Automatically unlocks the mutex that it was created from on
82 /// destruction.
83 ///
84 /// Using this makes lock-based code resilient to unwinding/task
85 /// failure, because the lock will be automatically unlocked even
86 /// then.
87 #[must_use]
88 pub struct LockGuard<'a> {
89 lock: &'a StaticNativeMutex
90 }
91
92 pub static NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
93 inner: imp::MUTEX_INIT,
94 };
95
96 impl StaticNativeMutex {
97 /// Creates a new mutex.
98 ///
99 /// Note that a mutex created in this way needs to be explicit
100 /// freed with a call to `destroy` or it will leak.
101 pub unsafe fn new() -> StaticNativeMutex {
102 StaticNativeMutex { inner: imp::Mutex::new() }
103 }
104
105 /// Acquires this lock. This assumes that the current thread does not
106 /// already hold the lock.
107 ///
108 /// # Example
109 ///
110 /// ```rust
111 /// use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
112 /// static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
113 /// unsafe {
114 /// let _guard = LOCK.lock();
115 /// // critical section...
116 /// } // automatically unlocked in `_guard`'s destructor
117 /// ```
118 pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
119 self.inner.lock();
120
121 LockGuard { lock: self }
122 }
123
124 /// Attempts to acquire the lock. The value returned is `Some` if
125 /// the attempt succeeded.
126 pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
127 if self.inner.trylock() {
128 Some(LockGuard { lock: self })
129 } else {
130 None
131 }
132 }
133
134 /// Acquire the lock without creating a `LockGuard`.
135 ///
136 /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
137 /// `.lock`.
138 pub unsafe fn lock_noguard(&self) { self.inner.lock() }
139
140 /// Attempts to acquire the lock without creating a
141 /// `LockGuard`. The value returned is whether the lock was
142 /// acquired or not.
143 ///
144 /// If `true` is returned, this needs to be paired with a call to
145 /// `.unlock_noguard`. Prefer using `.trylock`.
146 pub unsafe fn trylock_noguard(&self) -> bool {
147 self.inner.trylock()
148 }
149
150 /// Unlocks the lock. This assumes that the current thread already holds the
151 /// lock.
152 pub unsafe fn unlock_noguard(&self) { self.inner.unlock() }
153
154 /// Block on the internal condition variable.
155 ///
156 /// This function assumes that the lock is already held. Prefer
157 /// using `LockGuard.wait` since that guarantees that the lock is
158 /// held.
159 pub unsafe fn wait_noguard(&self) { self.inner.wait() }
160
161 /// Signals a thread in `wait` to wake up
162 pub unsafe fn signal_noguard(&self) { self.inner.signal() }
163
164 /// This function is especially unsafe because there are no guarantees made
165 /// that no other thread is currently holding the lock or waiting on the
166 /// condition variable contained inside.
167 pub unsafe fn destroy(&self) { self.inner.destroy() }
168 }
169
170 impl NativeMutex {
171 /// Creates a new mutex.
172 ///
173 /// The user must be careful to ensure the mutex is not locked when its is
174 /// being destroyed.
175 pub unsafe fn new() -> NativeMutex {
176 NativeMutex { inner: StaticNativeMutex::new() }
177 }
178
179 /// Acquires this lock. This assumes that the current thread does not
180 /// already hold the lock.
181 ///
182 /// # Example
183 /// ```rust
184 /// use std::unstable::mutex::NativeMutex;
185 /// unsafe {
186 /// let mut lock = NativeMutex::new();
187 ///
188 /// {
189 /// let _guard = lock.lock();
190 /// // critical section...
191 /// } // automatically unlocked in `_guard`'s destructor
192 /// }
193 /// ```
194 pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
195 self.inner.lock()
196 }
197
198 /// Attempts to acquire the lock. The value returned is `Some` if
199 /// the attempt succeeded.
200 pub unsafe fn trylock<'a>(&'a self) -> Option<LockGuard<'a>> {
201 self.inner.trylock()
202 }
203
204 /// Acquire the lock without creating a `LockGuard`.
205 ///
206 /// These needs to be paired with a call to `.unlock_noguard`. Prefer using
207 /// `.lock`.
208 pub unsafe fn lock_noguard(&self) { self.inner.lock_noguard() }
209
210 /// Attempts to acquire the lock without creating a
211 /// `LockGuard`. The value returned is whether the lock was
212 /// acquired or not.
213 ///
214 /// If `true` is returned, this needs to be paired with a call to
215 /// `.unlock_noguard`. Prefer using `.trylock`.
216 pub unsafe fn trylock_noguard(&self) -> bool {
217 self.inner.trylock_noguard()
218 }
219
220 /// Unlocks the lock. This assumes that the current thread already holds the
221 /// lock.
222 pub unsafe fn unlock_noguard(&self) { self.inner.unlock_noguard() }
223
224 /// Block on the internal condition variable.
225 ///
226 /// This function assumes that the lock is already held. Prefer
227 /// using `LockGuard.wait` since that guarantees that the lock is
228 /// held.
229 pub unsafe fn wait_noguard(&self) { self.inner.wait_noguard() }
230
231 /// Signals a thread in `wait` to wake up
232 pub unsafe fn signal_noguard(&self) { self.inner.signal_noguard() }
233 }
234
235 impl Drop for NativeMutex {
236 fn drop(&mut self) {
237 unsafe {self.inner.destroy()}
238 }
239 }
240
241 impl<'a> LockGuard<'a> {
242 /// Block on the internal condition variable.
243 pub unsafe fn wait(&self) {
244 self.lock.wait_noguard()
245 }
246
247 /// Signals a thread in `wait` to wake up.
248 pub unsafe fn signal(&self) {
249 self.lock.signal_noguard()
250 }
251 }
252
253 #[unsafe_destructor]
254 impl<'a> Drop for LockGuard<'a> {
255 fn drop(&mut self) {
256 unsafe {self.lock.unlock_noguard()}
257 }
258 }
259
260 #[cfg(unix)]
261 mod imp {
262 use libc;
263 use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER,
264 pthread_mutex_t, pthread_cond_t};
265 use mem;
266 use ty::Unsafe;
267 use kinds::marker;
268
269 type pthread_mutexattr_t = libc::c_void;
270 type pthread_condattr_t = libc::c_void;
271
272 #[cfg(target_os = "freebsd")]
273 mod os {
274 use libc;
275
276 pub type pthread_mutex_t = *libc::c_void;
277 pub type pthread_cond_t = *libc::c_void;
278
279 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t =
280 0 as pthread_mutex_t;
281 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t =
282 0 as pthread_cond_t;
283 }
284
285 #[cfg(target_os = "macos")]
286 mod os {
287 use libc;
288
289 #[cfg(target_arch = "x86_64")]
290 static __PTHREAD_MUTEX_SIZE__: uint = 56;
291 #[cfg(target_arch = "x86_64")]
292 static __PTHREAD_COND_SIZE__: uint = 40;
293 #[cfg(target_arch = "x86")]
294 static __PTHREAD_MUTEX_SIZE__: uint = 40;
295 #[cfg(target_arch = "x86")]
296 static __PTHREAD_COND_SIZE__: uint = 24;
297 static _PTHREAD_MUTEX_SIG_init: libc::c_long = 0x32AAABA7;
298 static _PTHREAD_COND_SIG_init: libc::c_long = 0x3CB0B1BB;
299
300 pub struct pthread_mutex_t {
301 __sig: libc::c_long,
302 __opaque: [u8, ..__PTHREAD_MUTEX_SIZE__],
303 }
304 pub struct pthread_cond_t {
305 __sig: libc::c_long,
306 __opaque: [u8, ..__PTHREAD_COND_SIZE__],
307 }
308
309 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
310 __sig: _PTHREAD_MUTEX_SIG_init,
311 __opaque: [0, ..__PTHREAD_MUTEX_SIZE__],
312 };
313 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
314 __sig: _PTHREAD_COND_SIG_init,
315 __opaque: [0, ..__PTHREAD_COND_SIZE__],
316 };
317 }
318
319 #[cfg(target_os = "linux")]
320 mod os {
321 use libc;
322
323 // minus 8 because we have an 'align' field
324 #[cfg(target_arch = "x86_64")]
325 static __SIZEOF_PTHREAD_MUTEX_T: uint = 40 - 8;
326 #[cfg(target_arch = "x86")]
327 static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
328 #[cfg(target_arch = "arm")]
329 static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
330 #[cfg(target_arch = "mips")]
331 static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
332 #[cfg(target_arch = "x86_64")]
333 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
334 #[cfg(target_arch = "x86")]
335 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
336 #[cfg(target_arch = "arm")]
337 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
338 #[cfg(target_arch = "mips")]
339 static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
340
341 pub struct pthread_mutex_t {
342 __align: libc::c_longlong,
343 size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T],
344 }
345 pub struct pthread_cond_t {
346 __align: libc::c_longlong,
347 size: [u8, ..__SIZEOF_PTHREAD_COND_T],
348 }
349
350 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
351 __align: 0,
352 size: [0, ..__SIZEOF_PTHREAD_MUTEX_T],
353 };
354 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
355 __align: 0,
356 size: [0, ..__SIZEOF_PTHREAD_COND_T],
357 };
358 }
359 #[cfg(target_os = "android")]
360 mod os {
361 use libc;
362
363 pub struct pthread_mutex_t { value: libc::c_int }
364 pub struct pthread_cond_t { value: libc::c_int }
365
366 pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
367 value: 0,
368 };
369 pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
370 value: 0,
371 };
372 }
373
374 pub struct Mutex {
375 lock: Unsafe<pthread_mutex_t>,
376 cond: Unsafe<pthread_cond_t>,
377 }
378
379 pub static MUTEX_INIT: Mutex = Mutex {
380 lock: Unsafe {
381 value: PTHREAD_MUTEX_INITIALIZER,
382 marker1: marker::InvariantType,
383 },
384 cond: Unsafe {
385 value: PTHREAD_COND_INITIALIZER,
386 marker1: marker::InvariantType,
387 },
388 };
389
390 impl Mutex {
391 pub unsafe fn new() -> Mutex {
392 let m = Mutex {
393 lock: Unsafe::new(mem::init()),
394 cond: Unsafe::new(mem::init()),
395 };
396
397 pthread_mutex_init(m.lock.get(), 0 as *libc::c_void);
398 pthread_cond_init(m.cond.get(), 0 as *libc::c_void);
399
400 return m;
401 }
402
403 pub unsafe fn lock(&self) { pthread_mutex_lock(self.lock.get()); }
404 pub unsafe fn unlock(&self) { pthread_mutex_unlock(self.lock.get()); }
405 pub unsafe fn signal(&self) { pthread_cond_signal(self.cond.get()); }
406 pub unsafe fn wait(&self) {
407 pthread_cond_wait(self.cond.get(), self.lock.get());
408 }
409 pub unsafe fn trylock(&self) -> bool {
410 pthread_mutex_trylock(self.lock.get()) == 0
411 }
412 pub unsafe fn destroy(&self) {
413 pthread_mutex_destroy(self.lock.get());
414 pthread_cond_destroy(self.cond.get());
415 }
416 }
417
418 extern {
419 fn pthread_mutex_init(lock: *mut pthread_mutex_t,
420 attr: *pthread_mutexattr_t) -> libc::c_int;
421 fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int;
422 fn pthread_cond_init(cond: *mut pthread_cond_t,
423 attr: *pthread_condattr_t) -> libc::c_int;
424 fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> libc::c_int;
425 fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int;
426 fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int;
427 fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int;
428
429 fn pthread_cond_wait(cond: *mut pthread_cond_t,
430 lock: *mut pthread_mutex_t) -> libc::c_int;
431 fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int;
432 }
433 }
434
435 #[cfg(windows)]
436 mod imp {
437 use rt::global_heap::malloc_raw;
438 use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
439 use libc;
440 use ptr;
441 use sync::atomics;
442
443 type LPCRITICAL_SECTION = *mut c_void;
444 static SPIN_COUNT: DWORD = 4000;
445 #[cfg(target_arch = "x86")]
446 static CRIT_SECTION_SIZE: uint = 24;
447 #[cfg(target_arch = "x86_64")]
448 static CRIT_SECTION_SIZE: uint = 40;
449
450 pub struct Mutex {
451 // pointers for the lock/cond handles, atomically updated
452 lock: atomics::AtomicUint,
453 cond: atomics::AtomicUint,
454 }
455
456 pub static MUTEX_INIT: Mutex = Mutex {
457 lock: atomics::INIT_ATOMIC_UINT,
458 cond: atomics::INIT_ATOMIC_UINT,
459 };
460
461 impl Mutex {
462 pub unsafe fn new() -> Mutex {
463 Mutex {
464 lock: atomics::AtomicUint::new(init_lock()),
465 cond: atomics::AtomicUint::new(init_cond()),
466 }
467 }
468 pub unsafe fn lock(&self) {
469 EnterCriticalSection(self.getlock() as LPCRITICAL_SECTION)
470 }
471 pub unsafe fn trylock(&self) -> bool {
472 TryEnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) != 0
473 }
474 pub unsafe fn unlock(&self) {
475 LeaveCriticalSection(self.getlock() as LPCRITICAL_SECTION)
476 }
477
478 pub unsafe fn wait(&self) {
479 self.unlock();
480 WaitForSingleObject(self.getcond() as HANDLE, libc::INFINITE);
481 self.lock();
482 }
483
484 pub unsafe fn signal(&self) {
485 assert!(SetEvent(self.getcond() as HANDLE) != 0);
486 }
487
488 /// This function is especially unsafe because there are no guarantees made
489 /// that no other thread is currently holding the lock or waiting on the
490 /// condition variable contained inside.
491 pub unsafe fn destroy(&self) {
492 let lock = self.lock.swap(0, atomics::SeqCst);
493 let cond = self.cond.swap(0, atomics::SeqCst);
494 if lock != 0 { free_lock(lock) }
495 if cond != 0 { free_cond(cond) }
496 }
497
498 unsafe fn getlock(&self) -> *mut c_void {
499 match self.lock.load(atomics::SeqCst) {
500 0 => {}
501 n => return n as *mut c_void
502 }
503 let lock = init_lock();
504 match self.lock.compare_and_swap(0, lock, atomics::SeqCst) {
505 0 => return lock as *mut c_void,
506 _ => {}
507 }
508 free_lock(lock);
509 return self.lock.load(atomics::SeqCst) as *mut c_void;
510 }
511
512 unsafe fn getcond(&self) -> *mut c_void {
513 match self.cond.load(atomics::SeqCst) {
514 0 => {}
515 n => return n as *mut c_void
516 }
517 let cond = init_cond();
518 match self.cond.compare_and_swap(0, cond, atomics::SeqCst) {
519 0 => return cond as *mut c_void,
520 _ => {}
521 }
522 free_cond(cond);
523 return self.cond.load(atomics::SeqCst) as *mut c_void;
524 }
525 }
526
527 pub unsafe fn init_lock() -> uint {
528 let block = malloc_raw(CRIT_SECTION_SIZE as uint) as *mut c_void;
529 InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
530 return block as uint;
531 }
532
533 pub unsafe fn init_cond() -> uint {
534 return CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
535 ptr::null()) as uint;
536 }
537
538 pub unsafe fn free_lock(h: uint) {
539 DeleteCriticalSection(h as LPCRITICAL_SECTION);
540 libc::free(h as *mut c_void);
541 }
542
543 pub unsafe fn free_cond(h: uint) {
544 let block = h as HANDLE;
545 libc::CloseHandle(block);
546 }
547
548 extern "system" {
549 fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
550 bManualReset: BOOL,
551 bInitialState: BOOL,
552 lpName: LPCSTR) -> HANDLE;
553 fn InitializeCriticalSectionAndSpinCount(
554 lpCriticalSection: LPCRITICAL_SECTION,
555 dwSpinCount: DWORD) -> BOOL;
556 fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
557 fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
558 fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
559 fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
560 fn SetEvent(hEvent: HANDLE) -> BOOL;
561 fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
562 }
563 }
564
565 #[cfg(test)]
566 mod test {
567 use prelude::*;
568
569 use mem::drop;
570 use super::{StaticNativeMutex, NATIVE_MUTEX_INIT};
571 use rt::thread::Thread;
572
573 #[test]
574 fn smoke_lock() {
575 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
576 unsafe {
577 let _guard = lock.lock();
578 }
579 }
580
581 #[test]
582 fn smoke_cond() {
583 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
584 unsafe {
585 let guard = lock.lock();
586 let t = Thread::start(proc() {
587 let guard = lock.lock();
588 guard.signal();
589 });
590 guard.wait();
591 drop(guard);
592
593 t.join();
594 }
595 }
596
597 #[test]
598 fn smoke_lock_noguard() {
599 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
600 unsafe {
601 lock.lock_noguard();
602 lock.unlock_noguard();
603 }
604 }
605
606 #[test]
607 fn smoke_cond_noguard() {
608 static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
609 unsafe {
610 lock.lock_noguard();
611 let t = Thread::start(proc() {
612 lock.lock_noguard();
613 lock.signal_noguard();
614 lock.unlock_noguard();
615 });
616 lock.wait_noguard();
617 lock.unlock_noguard();
618
619 t.join();
620 }
621 }
622
623 #[test]
624 fn destroy_immediately() {
625 unsafe {
626 let m = StaticNativeMutex::new();
627 m.destroy();
628 }
629 }
630 }
libstd/unstable/mutex.rs:76:13-76:13 -struct- definition:
/// statics.
pub struct NativeMutex {
inner: StaticNativeMutex
references:- 9235: impl Drop for NativeMutex {
236: fn drop(&mut self) {
libstd/comm/shared.rs:
59: // select()
60: select_lock: NativeMutex,
61: }
libstd/comm/sync.rs:
58: /// The state field is protected by this mutex
59: lock: NativeMutex,
60: state: Unsafe<State<T>>,
--
113: fn wait(slot: &mut Blocker, f: fn(BlockedTask) -> Blocker,
114: lock: &NativeMutex) {
115: let me: Box<Task> = Local::take();
--
447: impl Queue {
448: fn enqueue(&mut self, lock: &NativeMutex) {
449: let task: Box<Task> = Local::take();
libstd/unstable/sync.rs:
16: struct ExData<T> {
17: lock: NativeMutex,
18: failed: bool,
libstd/unstable/mutex.rs:
175: pub unsafe fn new() -> NativeMutex {
176: NativeMutex { inner: StaticNativeMutex::new() }
177: }
libstd/unstable/mutex.rs:341:8-341:8 -struct- definition:
pub struct pthread_mutex_t {
__align: libc::c_longlong,
size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T],
references:- 9350: pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t {
351: __align: 0,
--
420: attr: *pthread_mutexattr_t) -> libc::c_int;
421: fn pthread_mutex_destroy(lock: *mut pthread_mutex_t) -> libc::c_int;
422: fn pthread_cond_init(cond: *mut pthread_cond_t,
--
425: fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int;
426: fn pthread_mutex_trylock(lock: *mut pthread_mutex_t) -> libc::c_int;
427: fn pthread_mutex_unlock(lock: *mut pthread_mutex_t) -> libc::c_int;
--
429: fn pthread_cond_wait(cond: *mut pthread_cond_t,
430: lock: *mut pthread_mutex_t) -> libc::c_int;
431: fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int;
libstd/unstable/mutex.rs:374:4-374:4 -struct- definition:
pub struct Mutex {
lock: Unsafe<pthread_mutex_t>,
cond: Unsafe<pthread_cond_t>,
references:- 6379: pub static MUTEX_INIT: Mutex = Mutex {
380: lock: Unsafe {
--
390: impl Mutex {
391: pub unsafe fn new() -> Mutex {
392: let m = Mutex {
393: lock: Unsafe::new(mem::init()),
libstd/unstable/mutex.rs:87:12-87:12 -struct- definition:
pub struct LockGuard<'a> {
lock: &'a StaticNativeMutex
}
references:- 11121: LockGuard { lock: self }
122: }
--
127: if self.inner.trylock() {
128: Some(LockGuard { lock: self })
129: } else {
--
254: impl<'a> Drop for LockGuard<'a> {
255: fn drop(&mut self) {
libstd/comm/sync.rs:
127: /// Wakes up a task, dropping the lock at the correct time
128: fn wakeup(task: BlockedTask, guard: LockGuard) {
129: // We need to be careful to wake up the waiting task *outside* of the mutex
--
164: // the state.
165: fn lock<'a>(&'a self) -> (LockGuard<'a>, &'a mut State<T>) {
166: unsafe {
--
274: fn wakeup_senders(&self, waited: bool,
275: guard: LockGuard,
276: state: &mut State<T>) {
libstd/unstable/mutex.rs:
117: /// ```
118: pub unsafe fn lock<'a>(&'a self) -> LockGuard<'a> {
119: self.inner.lock();
libstd/unstable/mutex.rs:68:33-68:33 -struct- definition:
/// require manual deallocation.
pub struct StaticNativeMutex {
inner: imp::Mutex,
references:- 12101: pub unsafe fn new() -> StaticNativeMutex {
102: StaticNativeMutex { inner: imp::Mutex::new() }
103: }
libstd/rt/backtrace.rs:
261: // worry about this being a native vs green mutex.
262: static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
263: let _g = unsafe { LOCK.lock() };
libstd/rt/args.rs:
77: static mut global_args_ptr: uint = 0;
78: static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
libstd/rt/bookkeeping.rs:
27: static mut TASK_COUNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
28: static mut TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
libstd/os.rs:
159: static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
libstd/unstable/dynamic_lib.rs:
176: use unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
177: static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT;
178: unsafe {
libstd/unstable/mutex.rs:
92: pub static NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex {
93: inner: imp::MUTEX_INIT,
libstd/unstable/mutex.rs:345:8-345:8 -struct- definition:
pub struct pthread_cond_t {
__align: libc::c_longlong,
size: [u8, ..__SIZEOF_PTHREAD_COND_T],
references:- 7353: };
354: pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t {
355: __align: 0,
--
423: attr: *pthread_condattr_t) -> libc::c_int;
424: fn pthread_cond_destroy(cond: *mut pthread_cond_t) -> libc::c_int;
425: fn pthread_mutex_lock(lock: *mut pthread_mutex_t) -> libc::c_int;
--
430: lock: *mut pthread_mutex_t) -> libc::c_int;
431: fn pthread_cond_signal(cond: *mut pthread_cond_t) -> libc::c_int;
432: }