(index<- ) ./libgreen/stack.rs
git branch: * master 5200215 auto merge of #14035 : alexcrichton/rust/experimental, r=huonw
modified: Sat Apr 19 11:22:39 2014
1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use std::rt::env::max_cached_stacks;
12 use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
13 MapNonStandardFlags, MapVirtual};
14 use libc;
15
16 /// A task's stack. The name "Stack" is a vestige of segmented stacks.
17 pub struct Stack {
18 buf: MemoryMap,
19 min_size: uint,
20 valgrind_id: libc::c_uint,
21 }
22
23 // Try to use MAP_STACK on platforms that support it (it's what we're doing
24 // anyway), but some platforms don't support it at all. For example, it appears
25 // that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
26 // fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
27 #[cfg(not(windows), not(target_os = "freebsd"))]
28 static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE |
29 libc::MAP_ANON;
30 #[cfg(target_os = "freebsd")]
31 static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON;
32 #[cfg(windows)]
33 static STACK_FLAGS: libc::c_int = 0;
34
35 impl Stack {
36 /// Allocate a new stack of `size`. If size = 0, this will fail. Use
37 /// `dummy_stack` if you want a zero-sized stack.
38 pub fn new(size: uint) -> Stack {
39 // Map in a stack. Eventually we might be able to handle stack
40 // allocation failure, which would fail to spawn the task. But there's
41 // not many sensible things to do on OOM. Failure seems fine (and is
42 // what the old stack allocation did).
43 let stack = match MemoryMap::new(size, [MapReadable, MapWritable,
44 MapNonStandardFlags(STACK_FLAGS)]) {
45 Ok(map) => map,
46 Err(e) => fail!("mmap for stack of size {} failed: {}", size, e)
47 };
48
49 // Change the last page to be inaccessible. This is to provide safety;
50 // when an FFI function overflows it will (hopefully) hit this guard
51 // page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is
52 // guaranteed to be aligned properly.
53 if !protect_last_page(&stack) {
54 fail!("Could not memory-protect guard page. stack={:?}, errno={}",
55 stack, errno());
56 }
57
58 let mut stk = Stack {
59 buf: stack,
60 min_size: size,
61 valgrind_id: 0
62 };
63
64 // FIXME: Using the FFI to call a C macro. Slow
65 stk.valgrind_id = unsafe {
66 rust_valgrind_stack_register(stk.start(), stk.end())
67 };
68 return stk;
69 }
70
71 /// Create a 0-length stack which starts (and ends) at 0.
72 pub unsafe fn dummy_stack() -> Stack {
73 Stack {
74 buf: MemoryMap { data: 0 as *mut u8, len: 0, kind: MapVirtual },
75 min_size: 0,
76 valgrind_id: 0
77 }
78 }
79
80 /// Point to the low end of the allocated stack
81 pub fn start(&self) -> *uint {
82 self.buf.data as *uint
83 }
84
85 /// Point one uint beyond the high end of the allocated stack
86 pub fn end(&self) -> *uint {
87 unsafe {
88 self.buf.data.offset(self.buf.len as int) as *uint
89 }
90 }
91 }
92
93 #[cfg(unix)]
94 fn protect_last_page(stack: &MemoryMap) -> bool {
95 unsafe {
96 // This may seem backwards: the start of the segment is the last page?
97 // Yes! The stack grows from higher addresses (the end of the allocated
98 // block) to lower addresses (the start of the allocated block).
99 let last_page = stack.data as *libc::c_void;
100 libc::mprotect(last_page, page_size() as libc::size_t,
101 libc::PROT_NONE) != -1
102 }
103 }
104
105 #[cfg(windows)]
106 fn protect_last_page(stack: &MemoryMap) -> bool {
107 unsafe {
108 // see above
109 let last_page = stack.data as *mut libc::c_void;
110 let mut old_prot: libc::DWORD = 0;
111 libc::VirtualProtect(last_page, page_size() as libc::SIZE_T,
112 libc::PAGE_NOACCESS,
113 &mut old_prot as libc::LPDWORD) != 0
114 }
115 }
116
117 impl Drop for Stack {
118 fn drop(&mut self) {
119 unsafe {
120 // FIXME: Using the FFI to call a C macro. Slow
121 rust_valgrind_stack_deregister(self.valgrind_id);
122 }
123 }
124 }
125
126 pub struct StackPool {
127 // Ideally this would be some datastructure that preserved ordering on
128 // Stack.min_size.
129 stacks: Vec<Stack>,
130 }
131
132 impl StackPool {
133 pub fn new() -> StackPool {
134 StackPool {
135 stacks: vec![],
136 }
137 }
138
139 pub fn take_stack(&mut self, min_size: uint) -> Stack {
140 // Ideally this would be a binary search
141 match self.stacks.iter().position(|s| min_size <= s.min_size) {
142 Some(idx) => self.stacks.swap_remove(idx).unwrap(),
143 None => Stack::new(min_size)
144 }
145 }
146
147 pub fn give_stack(&mut self, stack: Stack) {
148 if self.stacks.len() <= max_cached_stacks() {
149 self.stacks.push(stack)
150 }
151 }
152 }
153
154 extern {
155 fn rust_valgrind_stack_register(start: *libc::uintptr_t,
156 end: *libc::uintptr_t) -> libc::c_uint;
157 fn rust_valgrind_stack_deregister(id: libc::c_uint);
158 }
159
160 #[cfg(test)]
161 mod tests {
162 use super::StackPool;
163
164 #[test]
165 fn stack_pool_caches() {
166 let mut p = StackPool::new();
167 let s = p.take_stack(10);
168 p.give_stack(s);
169 let s = p.take_stack(4);
170 assert_eq!(s.min_size, 10);
171 p.give_stack(s);
172 let s = p.take_stack(14);
173 assert_eq!(s.min_size, 14);
174 p.give_stack(s);
175 }
176
177 #[test]
178 fn stack_pool_caches_exact() {
179 let mut p = StackPool::new();
180 let mut s = p.take_stack(10);
181 s.valgrind_id = 100;
182 p.give_stack(s);
183
184 let s = p.take_stack(10);
185 assert_eq!(s.min_size, 10);
186 assert_eq!(s.valgrind_id, 100);
187 }
188 }
libgreen/stack.rs:125:1-125:1 -struct- definition:
pub struct StackPool {
// Ideally this would be some datastructure that preserved ordering on
// Stack.min_size.
references:- 9133: pub fn new() -> StackPool {
134: StackPool {
135: stacks: vec![],
libgreen/task.rs:
175: /// new stack for this task.
176: pub fn configure(pool: &mut StackPool,
177: opts: TaskOpts,
libgreen/coroutine.rs:
39: /// Destroy coroutine and try to reuse std::stack segment.
40: pub fn recycle(self, stack_pool: &mut StackPool) {
41: let Coroutine { current_stack_segment, .. } = self;
libgreen/sched.rs:
43: /// The pool of stacks that this scheduler has cached
44: pub stack_pool: StackPool,
45: /// Bookkeeping for the number of tasks which are currently running around
libgreen/lib.rs:
364: next_friend: uint,
365: stack_pool: StackPool,
366: deque_pool: deque::BufferPool<Box<task::GreenTask>>,
libgreen/stack.rs:16:71-16:71 -struct- definition:
/// A task's stack. The name "Stack" is a vestige of segmented stacks.
pub struct Stack {
buf: MemoryMap,
references:- 1158: let mut stk = Stack {
59: buf: stack,
--
117: impl Drop for Stack {
118: fn drop(&mut self) {
--
147: pub fn give_stack(&mut self, stack: Stack) {
148: if self.stacks.len() <= max_cached_stacks() {
libgreen/context.rs:
49: pub fn new(init: InitFn, arg: uint, start: proc():Send,
50: stack: &mut Stack) -> Context {
libgreen/coroutine.rs:
24: /// about the stack bounds.
25: pub current_stack_segment: Stack,
libgreen/stack.rs:
72: pub unsafe fn dummy_stack() -> Stack {
73: Stack {
74: buf: MemoryMap { data: 0 as *mut u8, len: 0, kind: MapVirtual },