(index<- ) ./libstd/rt/test.rs
1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use libc;
12 use option::{Some, None};
13 use cell::Cell;
14 use clone::Clone;
15 use container::Container;
16 use iter::{Iterator, range};
17 use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
18 use vec::{OwnedVector, MutableVector, ImmutableVector};
19 use path::GenericPath;
20 use rt::sched::Scheduler;
21 use unstable::{run_in_bare_thread};
22 use rt::thread::Thread;
23 use rt::task::Task;
24 use rt::uv::uvio::UvEventLoop;
25 use rt::work_queue::WorkQueue;
26 use rt::sleeper_list::SleeperList;
27 use rt::comm::oneshot;
28 use result::{Result, Ok, Err};
29
30 pub fn new_test_uv_sched() -> Scheduler {
31
32 let queue = WorkQueue::new();
33 let queues = ~[queue.clone()];
34
35 let mut sched = Scheduler::new(~UvEventLoop::new(),
36 queue,
37 queues,
38 SleeperList::new());
39
40 // Don't wait for the Shutdown message
41 sched.no_sleep = true;
42 return sched;
43
44 }
45
46 pub fn run_in_newsched_task(f: ~fn()) {
47 let f = Cell::new(f);
48 do run_in_bare_thread {
49 run_in_newsched_task_core(f.take());
50 }
51 }
52
53 pub fn run_in_newsched_task_core(f: ~fn()) {
54
55 use rt::sched::Shutdown;
56
57 let mut sched = ~new_test_uv_sched();
58 let exit_handle = Cell::new(sched.make_handle());
59
60 let on_exit: ~fn(bool) = |exit_status| {
61 exit_handle.take().send(Shutdown);
62 rtassert!(exit_status);
63 };
64 let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
65 task.death.on_exit = Some(on_exit);
66
67 sched.bootstrap(task);
68 }
69
70 #[cfg(target_os="macos")]
71 #[allow(non_camel_case_types)]
72 mod darwin_fd_limit {
73 /*!
74 * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
75 * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
76 * for our multithreaded scheduler testing, depending on the number of cores available.
77 *
78 * This fixes issue #7772.
79 */
80
81 use libc;
82 type rlim_t = libc::uint64_t;
83 struct rlimit {
84 rlim_cur: rlim_t,
85 rlim_max: rlim_t
86 }
87 #[nolink]
88 extern {
89 // name probably doesn't need to be mut, but the C function doesn't specify const
90 fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
91 oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
92 newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
93 fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
94 fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
95 }
96 static CTL_KERN: libc::c_int = 1;
97 static KERN_MAXFILESPERPROC: libc::c_int = 29;
98 static RLIMIT_NOFILE: libc::c_int = 8;
99
100 pub unsafe fn raise_fd_limit() {
101 #[fixed_stack_segment]; #[inline(never)];
102
103 // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
104 // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
105 use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
106 use sys::size_of_val;
107 use os::last_os_error;
108
109 // Fetch the kern.maxfilesperproc value
110 let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
111 let mut maxfiles: libc::c_int = 0;
112 let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
113 if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
114 to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
115 to_mut_unsafe_ptr(&mut size),
116 mut_null(), 0) != 0 {
117 let err = last_os_error();
118 error2!("raise_fd_limit: error calling sysctl: {}", err);
119 return;
120 }
121
122 // Fetch the current resource limits
123 let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
124 if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 {
125 let err = last_os_error();
126 error2!("raise_fd_limit: error calling getrlimit: {}", err);
127 return;
128 }
129
130 // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
131 rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
132
133 // Set our newly-increased resource limit
134 if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 {
135 let err = last_os_error();
136 error2!("raise_fd_limit: error calling setrlimit: {}", err);
137 return;
138 }
139 }
140 }
141
142 #[cfg(not(target_os="macos"))]
143 mod darwin_fd_limit {
144 pub unsafe fn raise_fd_limit() {}
145 }
146
147 #[doc(hidden)]
148 pub fn prepare_for_lots_of_tests() {
149 // Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
150 unsafe { darwin_fd_limit::raise_fd_limit() }
151 }
152
153 /// Create more than one scheduler and run a function in a task
154 /// in one of the schedulers. The schedulers will stay alive
155 /// until the function `f` returns.
156 pub fn run_in_mt_newsched_task(f: ~fn()) {
157 use os;
158 use from_str::FromStr;
159 use rt::sched::Shutdown;
160 use rt::util;
161
162 // see comment in other function (raising fd limits)
163 prepare_for_lots_of_tests();
164
165 let f = Cell::new(f);
166
167 do run_in_bare_thread {
168 let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
169 Some(nstr) => FromStr::from_str(nstr).unwrap(),
170 None => {
171 if util::limit_thread_creation_due_to_osx_and_valgrind() {
172 1
173 } else {
174 // Using more threads than cores in test code
175 // to force the OS to preempt them frequently.
176 // Assuming that this help stress test concurrent types.
177 util::num_cpus() * 2
178 }
179 }
180 };
181
182 let sleepers = SleeperList::new();
183
184 let mut handles = ~[];
185 let mut scheds = ~[];
186 let mut work_queues = ~[];
187
188 for _ in range(0u, nthreads) {
189 let work_queue = WorkQueue::new();
190 work_queues.push(work_queue);
191 }
192
193 for i in range(0u, nthreads) {
194 let loop_ = ~UvEventLoop::new();
195 let mut sched = ~Scheduler::new(loop_,
196 work_queues[i].clone(),
197 work_queues.clone(),
198 sleepers.clone());
199 let handle = sched.make_handle();
200
201 handles.push(handle);
202 scheds.push(sched);
203 }
204
205 let handles = Cell::new(handles);
206 let on_exit: ~fn(bool) = |exit_status| {
207 let mut handles = handles.take();
208 // Tell schedulers to exit
209 for handle in handles.mut_iter() {
210 handle.send(Shutdown);
211 }
212
213 rtassert!(exit_status);
214 };
215 let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
216 main_task.death.on_exit = Some(on_exit);
217
218 let mut threads = ~[];
219 let main_task = Cell::new(main_task);
220
221 let main_thread = {
222 let sched = scheds.pop();
223 let sched_cell = Cell::new(sched);
224 do Thread::start {
225 let sched = sched_cell.take();
226 sched.bootstrap(main_task.take());
227 }
228 };
229 threads.push(main_thread);
230
231 while !scheds.is_empty() {
232 let mut sched = scheds.pop();
233 let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
234 rtdebug!("bootstrapping non-primary scheduler");
235 };
236 let bootstrap_task_cell = Cell::new(bootstrap_task);
237 let sched_cell = Cell::new(sched);
238 let thread = do Thread::start {
239 let sched = sched_cell.take();
240 sched.bootstrap(bootstrap_task_cell.take());
241 };
242
243 threads.push(thread);
244 }
245
246 // Wait for schedulers
247 for thread in threads.move_iter() {
248 thread.join();
249 }
250 }
251
252 }
253
254 /// Test tasks will abort on failure instead of unwinding
255 pub fn spawntask(f: ~fn()) {
256 Scheduler::run_task(Task::build_child(None, f));
257 }
258
259 /// Create a new task and run it right now. Aborts on failure
260 pub fn spawntask_later(f: ~fn()) {
261 Scheduler::run_task_later(Task::build_child(None, f));
262 }
263
264 pub fn spawntask_random(f: ~fn()) {
265 use rand::{Rand, rng};
266
267 let mut rng = rng();
268 let run_now: bool = Rand::rand(&mut rng);
269
270 if run_now {
271 spawntask(f)
272 } else {
273 spawntask_later(f)
274 }
275 }
276
277 pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
278
279 let (port, chan) = oneshot();
280 let chan = Cell::new(chan);
281 let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
282
283 let mut new_task = Task::build_root(None, f);
284 new_task.death.on_exit = Some(on_exit);
285
286 Scheduler::run_task(new_task);
287
288 let exit_status = port.recv();
289 if exit_status { Ok(()) } else { Err(()) }
290
291 }
292
293 /// Spawn a new task in a new scheduler and return a thread handle.
294 pub fn spawntask_thread(f: ~fn()) -> Thread {
295
296 let f = Cell::new(f);
297
298 let thread = do Thread::start {
299 run_in_newsched_task_core(f.take());
300 };
301
302 return thread;
303 }
304
305 /// Get a ~Task for testing purposes other than actually scheduling it.
306 pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
307 do run_in_bare_thread {
308 let mut sched = ~new_test_uv_sched();
309 let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
310 cleanup_task(task);
311 }
312 }
313
314 /// Use to cleanup tasks created for testing but not "run".
315 pub fn cleanup_task(mut task: ~Task) {
316 task.destroyed = true;
317 }
318
319 /// Get a port number, starting at 9600, for use in tests
320 #[fixed_stack_segment] #[inline(never)]
321 pub fn next_test_port() -> u16 {
322 unsafe {
323 return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
324 }
325 extern {
326 fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
327 }
328 }
329
330 /// Get a unique IPv4 localhost:port pair starting at 9600
331 pub fn next_test_ip4() -> SocketAddr {
332 SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
333 }
334
335 /// Get a unique IPv6 localhost:port pair starting at 9600
336 pub fn next_test_ip6() -> SocketAddr {
337 SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
338 }
339
340 /*
341 XXX: Welcome to MegaHack City.
342
343 The bots run multiple builds at the same time, and these builds
344 all want to use ports. This function figures out which workspace
345 it is running in and assigns a port range based on it.
346 */
347 fn base_port() -> uint {
348 use os;
349 use str::StrSlice;
350 use vec::ImmutableVector;
351
352 let base = 9600u;
353 let range = 1000;
354
355 let bases = [
356 ("32-opt", base + range * 1),
357 ("32-noopt", base + range * 2),
358 ("64-opt", base + range * 3),
359 ("64-noopt", base + range * 4),
360 ("64-opt-vg", base + range * 5),
361 ("all-opt", base + range * 6),
362 ("snap3", base + range * 7),
363 ("dist", base + range * 8)
364 ];
365
366 // FIXME (#9639): This needs to handle non-utf8 paths
367 let path = os::getcwd();
368 let path_s = path.as_str().unwrap();
369
370 let mut final_base = base;
371
372 for &(dir, base) in bases.iter() {
373 if path_s.contains(dir) {
374 final_base = base;
375 break;
376 }
377 }
378
379 return final_base;
380 }
381
382 /// Get a constant that represents the number of times to repeat
383 /// stress tests. Default 1.
384 pub fn stress_factor() -> uint {
385 use os::getenv;
386 use from_str::from_str;
387
388 match getenv("RUST_RT_STRESS") {
389 Some(val) => from_str::<uint>(val).unwrap(),
390 None => 1
391 }
392 }
libstd/rt/test.rs:147:15-147:15 -fn- definition:
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
references:-163: prepare_for_lots_of_tests();
libstd/rt/test.rs:29:1-29:1 -fn- definition:
pub fn new_test_uv_sched() -> Scheduler {
references:-57: let mut sched = ~new_test_uv_sched();
308: let mut sched = ~new_test_uv_sched();
libstd/rt/test.rs:254:58-254:58 -fn- definition:
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
references:-271: spawntask(f)
libstd/rt/test.rs:320:40-320:40 -fn- definition:
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_port() -> u16 {
references:-337: SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
332: SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
libstd/rt/test.rs:314:60-314:60 -fn- definition:
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
references:-310: cleanup_task(task);
libstd/rt/test.rs:346:3-346:3 -fn- definition:
*/
fn base_port() -> uint {
references:-323: return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
libstd/rt/test.rs:52:1-52:1 -fn- definition:
pub fn run_in_newsched_task_core(f: ~fn()) {
references:-49: run_in_newsched_task_core(f.take());
299: run_in_newsched_task_core(f.take());
libstd/rt/test.rs:144:4-144:4 -fn- definition:
pub unsafe fn raise_fd_limit() {}
}
references:-150: unsafe { darwin_fd_limit::raise_fd_limit() }
libstd/rt/test.rs:259:62-259:62 -fn- definition:
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
references:-273: spawntask_later(f)