(index<- ) ./libtest/lib.rs
git branch: * master 5200215 auto merge of #14035 : alexcrichton/rust/experimental, r=huonw
modified: Fri May 9 13:02:28 2014
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Support code for rustc's built in unit-test and micro-benchmarking
12 //! framework.
13 //!
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
18 //!
19 //! See the [Testing Guide](../guide-testing.html) for more details.
20
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
24 // build off of.
25
26 #![crate_id = "test#0.11-pre"]
27 #![comment = "Rust internal test library only used by rustc"]
28 #![license = "MIT/ASL2"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://static.rust-lang.org/doc/master")]
34
35 #![feature(asm, macro_rules)]
36 #![deny(deprecated_owned_vector)]
37
38 extern crate collections;
39 extern crate getopts;
40 extern crate serialize;
41 extern crate term;
42 extern crate time;
43
44 use collections::TreeMap;
45 use stats::Stats;
46 use time::precise_time_ns;
47 use getopts::{OptGroup, optflag, optopt};
48 use serialize::{json, Decodable};
49 use serialize::json::{Json, ToJson};
50 use term::Terminal;
51 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
52
53 use std::cmp;
54 use std::f64;
55 use std::fmt;
56 use std::from_str::FromStr;
57 use std::io::stdio::StdWriter;
58 use std::io::{File, ChanReader, ChanWriter};
59 use std::io;
60 use std::os;
61 use std::str;
62 use std::strbuf::StrBuf;
63 use std::task::TaskBuilder;
64
65 // to be used by rustc to compile tests in libtest
66 pub mod test {
67 pub use {Bencher, TestName, TestResult, TestDesc,
68 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
69 Metric, MetricMap, MetricAdded, MetricRemoved,
70 MetricChange, Improvement, Regression, LikelyNoise,
71 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
72 run_test, test_main, test_main_static, filter_tests,
73 parse_opts, StaticBenchFn};
74 }
75
76 pub mod stats;
77
78 // The name of a test. By convention this follows the rules for rust
79 // paths; i.e. it should be a series of identifiers separated by double
80 // colons. This way if some test runner wants to arrange the tests
81 // hierarchically it may.
82
83 #[deriving(Clone)]
84 pub enum TestName {
85 StaticTestName(&'static str),
86 DynTestName(~str)
87 }
88 impl fmt::Show for TestName {
89 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
90 match *self {
91 StaticTestName(s) => f.buf.write_str(s),
92 DynTestName(ref s) => f.buf.write_str(s.as_slice()),
93 }
94 }
95 }
96
97 #[deriving(Clone)]
98 enum NamePadding { PadNone, PadOnLeft, PadOnRight }
99
100 impl TestDesc {
101 fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
102 use std::num::Saturating;
103 let mut name = StrBuf::from_str(self.name.to_str());
104 let fill = column_count.saturating_sub(name.len());
105 let mut pad = StrBuf::from_owned_str(" ".repeat(fill));
106 match align {
107 PadNone => name.into_owned(),
108 PadOnLeft => {
109 pad.push_str(name.as_slice());
110 pad.into_owned()
111 }
112 PadOnRight => {
113 name.push_str(pad.as_slice());
114 name.into_owned()
115 }
116 }
117 }
118 }
119
120 /// Represents a benchmark function.
121 pub trait TDynBenchFn {
122 fn run(&self, harness: &mut Bencher);
123 }
124
125 // A function that runs a test. If the function returns successfully,
126 // the test succeeds; if the function fails then the test fails. We
127 // may need to come up with a more clever definition of test in order
128 // to support isolation of tests into tasks.
129 pub enum TestFn {
130 StaticTestFn(fn()),
131 StaticBenchFn(fn(&mut Bencher)),
132 StaticMetricFn(proc(&mut MetricMap)),
133 DynTestFn(proc():Send),
134 DynMetricFn(proc(&mut MetricMap)),
135 DynBenchFn(Box<TDynBenchFn>)
136 }
137
138 impl TestFn {
139 fn padding(&self) -> NamePadding {
140 match self {
141 &StaticTestFn(..) => PadNone,
142 &StaticBenchFn(..) => PadOnRight,
143 &StaticMetricFn(..) => PadOnRight,
144 &DynTestFn(..) => PadNone,
145 &DynMetricFn(..) => PadOnRight,
146 &DynBenchFn(..) => PadOnRight,
147 }
148 }
149 }
150
151 /// Manager of the benchmarking runs.
152 ///
153 /// This is feed into functions marked with `#[bench]` to allow for
154 /// set-up & tear-down before running a piece of code repeatedly via a
155 /// call to `iter`.
156 pub struct Bencher {
157 iterations: u64,
158 ns_start: u64,
159 ns_end: u64,
160 pub bytes: u64,
161 }
162
163 // The definition of a single test. A test runner will run a list of
164 // these.
165 #[deriving(Clone)]
166 pub struct TestDesc {
167 pub name: TestName,
168 pub ignore: bool,
169 pub should_fail: bool,
170 }
171
172 pub struct TestDescAndFn {
173 pub desc: TestDesc,
174 pub testfn: TestFn,
175 }
176
177 #[deriving(Clone, Encodable, Decodable, Eq, Show)]
178 pub struct Metric {
179 value: f64,
180 noise: f64
181 }
182
183 impl Metric {
184 pub fn new(value: f64, noise: f64) -> Metric {
185 Metric {value: value, noise: noise}
186 }
187 }
188
189 #[deriving(Eq)]
190 pub struct MetricMap(TreeMap<~str,Metric>);
191
192 impl Clone for MetricMap {
193 fn clone(&self) -> MetricMap {
194 let MetricMap(ref map) = *self;
195 MetricMap(map.clone())
196 }
197 }
198
199 /// Analysis of a single change in metric
200 #[deriving(Eq, Show)]
201 pub enum MetricChange {
202 LikelyNoise,
203 MetricAdded,
204 MetricRemoved,
205 Improvement(f64),
206 Regression(f64)
207 }
208
209 pub type MetricDiff = TreeMap<~str,MetricChange>;
210
211 // The default console test runner. It accepts the command line
212 // arguments and a vector of test_descs.
213 pub fn test_main(args: &[~str], tests: Vec<TestDescAndFn> ) {
214 let opts =
215 match parse_opts(args) {
216 Some(Ok(o)) => o,
217 Some(Err(msg)) => fail!("{}", msg),
218 None => return
219 };
220 match run_tests_console(&opts, tests) {
221 Ok(true) => {}
222 Ok(false) => fail!("Some tests failed"),
223 Err(e) => fail!("io error when running tests: {}", e),
224 }
225 }
226
227 // A variant optimized for invocation with a static test vector.
228 // This will fail (intentionally) when fed any dynamic tests, because
229 // it is copying the static values out into a dynamic vector and cannot
230 // copy dynamic values. It is doing this because from this point on
231 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
232 // semantics into parallel test runners, which in turn requires a ~[]
233 // rather than a &[].
234 pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
235 let owned_tests = tests.iter().map(|t| {
236 match t.testfn {
237 StaticTestFn(f) =>
238 TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
239
240 StaticBenchFn(f) =>
241 TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
242
243 _ => {
244 fail!("non-static tests passed to test::test_main_static");
245 }
246 }
247 }).collect();
248 test_main(args, owned_tests)
249 }
250
251 pub struct TestOpts {
252 pub filter: Option<~str>,
253 pub run_ignored: bool,
254 pub run_tests: bool,
255 pub run_benchmarks: bool,
256 pub ratchet_metrics: Option<Path>,
257 pub ratchet_noise_percent: Option<f64>,
258 pub save_metrics: Option<Path>,
259 pub test_shard: Option<(uint,uint)>,
260 pub logfile: Option<Path>,
261 pub nocapture: bool,
262 }
263
264 impl TestOpts {
265 #[cfg(test)]
266 fn new() -> TestOpts {
267 TestOpts {
268 filter: None,
269 run_ignored: false,
270 run_tests: false,
271 run_benchmarks: false,
272 ratchet_metrics: None,
273 ratchet_noise_percent: None,
274 save_metrics: None,
275 test_shard: None,
276 logfile: None,
277 nocapture: false,
278 }
279 }
280 }
281
282 /// Result of parsing the options.
283 pub type OptRes = Result<TestOpts, ~str>;
284
285 fn optgroups() -> Vec<getopts::OptGroup> {
286 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
287 getopts::optflag("", "test", "Run tests and not benchmarks"),
288 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
289 getopts::optflag("h", "help", "Display this message (longer with --help)"),
290 getopts::optopt("", "save-metrics", "Location to save bench metrics",
291 "PATH"),
292 getopts::optopt("", "ratchet-metrics",
293 "Location to load and save metrics from. The metrics \
294 loaded are cause benchmarks to fail if they run too \
295 slowly", "PATH"),
296 getopts::optopt("", "ratchet-noise-percent",
297 "Tests within N% of the recorded metrics will be \
298 considered as passing", "PERCENTAGE"),
299 getopts::optopt("", "logfile", "Write logs to the specified file instead \
300 of stdout", "PATH"),
301 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
302 "A.B"),
303 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
304 task, allow printing directly"))
305 }
306
307 fn usage(binary: &str, helpstr: &str) {
308 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
309 println!("{}", getopts::usage(message, optgroups().as_slice()));
310 println!("");
311 if helpstr == "help" {
312 println!("{}", "\
313 The FILTER is matched against the name of all tests to run, and if any tests
314 have a substring match, only those tests are run.
315
316 By default, all tests are run in parallel. This can be altered with the
317 RUST_TEST_TASKS environment variable when running tests (set it to 1).
318
319 All tests have their standard output and standard error captured by default.
320 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
321 environment variable. Logging is not captured by default.
322
323 Test Attributes:
324
325 #[test] - Indicates a function is a test to be run. This function
326 takes no arguments.
327 #[bench] - Indicates a function is a benchmark to be run. This
328 function takes one argument (test::Bencher).
329 #[should_fail] - This function (also labeled with #[test]) will only pass if
330 the code causes a failure (an assertion failure or fail!)
331 #[ignore] - When applied to a function which is already attributed as a
332 test, then the test runner will ignore these tests during
333 normal test runs. Running with --ignored will run these
334 tests. This may also be written as #[ignore(cfg(...))] to
335 ignore the test on certain configurations.");
336 }
337 }
338
339 // Parses command line arguments into test options
340 pub fn parse_opts(args: &[~str]) -> Option<OptRes> {
341 let args_ = args.tail();
342 let matches =
343 match getopts::getopts(args_, optgroups().as_slice()) {
344 Ok(m) => m,
345 Err(f) => return Some(Err(f.to_err_msg()))
346 };
347
348 if matches.opt_present("h") { usage(args[0], "h"); return None; }
349 if matches.opt_present("help") { usage(args[0], "help"); return None; }
350
351 let filter =
352 if matches.free.len() > 0 {
353 Some((*matches.free.get(0)).clone())
354 } else {
355 None
356 };
357
358 let run_ignored = matches.opt_present("ignored");
359
360 let logfile = matches.opt_str("logfile");
361 let logfile = logfile.map(|s| Path::new(s));
362
363 let run_benchmarks = matches.opt_present("bench");
364 let run_tests = ! run_benchmarks ||
365 matches.opt_present("test");
366
367 let ratchet_metrics = matches.opt_str("ratchet-metrics");
368 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
369
370 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
371 let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s).unwrap());
372
373 let save_metrics = matches.opt_str("save-metrics");
374 let save_metrics = save_metrics.map(|s| Path::new(s));
375
376 let test_shard = matches.opt_str("test-shard");
377 let test_shard = opt_shard(test_shard);
378
379 let mut nocapture = matches.opt_present("nocapture");
380 if !nocapture {
381 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
382 }
383
384 let test_opts = TestOpts {
385 filter: filter,
386 run_ignored: run_ignored,
387 run_tests: run_tests,
388 run_benchmarks: run_benchmarks,
389 ratchet_metrics: ratchet_metrics,
390 ratchet_noise_percent: ratchet_noise_percent,
391 save_metrics: save_metrics,
392 test_shard: test_shard,
393 logfile: logfile,
394 nocapture: nocapture,
395 };
396
397 Some(Ok(test_opts))
398 }
399
400 pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
401 match maybestr {
402 None => None,
403 Some(s) => {
404 let mut it = s.split('.');
405 match (it.next().and_then(from_str), it.next().and_then(from_str), it.next()) {
406 (Some(a), Some(b), None) => Some((a, b)),
407 _ => None,
408 }
409 }
410 }
411 }
412
413
414 #[deriving(Clone, Eq)]
415 pub struct BenchSamples {
416 ns_iter_summ: stats::Summary<f64>,
417 mb_s: uint,
418 }
419
420 #[deriving(Clone, Eq)]
421 pub enum TestResult {
422 TrOk,
423 TrFailed,
424 TrIgnored,
425 TrMetrics(MetricMap),
426 TrBench(BenchSamples),
427 }
428
429 enum OutputLocation<T> {
430 Pretty(term::Terminal<T>),
431 Raw(T),
432 }
433
434 struct ConsoleTestState<T> {
435 log_out: Option<File>,
436 out: OutputLocation<T>,
437 use_color: bool,
438 total: uint,
439 passed: uint,
440 failed: uint,
441 ignored: uint,
442 measured: uint,
443 metrics: MetricMap,
444 failures: Vec<(TestDesc, Vec<u8> )> ,
445 max_name_len: uint, // number of columns to fill when aligning names
446 }
447
448 impl<T: Writer> ConsoleTestState<T> {
449 pub fn new(opts: &TestOpts,
450 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
451 let log_out = match opts.logfile {
452 Some(ref path) => Some(try!(File::create(path))),
453 None => None
454 };
455 let out = match term::Terminal::new(io::stdio::stdout_raw()) {
456 Err(_) => Raw(io::stdio::stdout_raw()),
457 Ok(t) => Pretty(t)
458 };
459 Ok(ConsoleTestState {
460 out: out,
461 log_out: log_out,
462 use_color: use_color(),
463 total: 0u,
464 passed: 0u,
465 failed: 0u,
466 ignored: 0u,
467 measured: 0u,
468 metrics: MetricMap::new(),
469 failures: Vec::new(),
470 max_name_len: 0u,
471 })
472 }
473
474 pub fn write_ok(&mut self) -> io::IoResult<()> {
475 self.write_pretty("ok", term::color::GREEN)
476 }
477
478 pub fn write_failed(&mut self) -> io::IoResult<()> {
479 self.write_pretty("FAILED", term::color::RED)
480 }
481
482 pub fn write_ignored(&mut self) -> io::IoResult<()> {
483 self.write_pretty("ignored", term::color::YELLOW)
484 }
485
486 pub fn write_metric(&mut self) -> io::IoResult<()> {
487 self.write_pretty("metric", term::color::CYAN)
488 }
489
490 pub fn write_bench(&mut self) -> io::IoResult<()> {
491 self.write_pretty("bench", term::color::CYAN)
492 }
493
494 pub fn write_added(&mut self) -> io::IoResult<()> {
495 self.write_pretty("added", term::color::GREEN)
496 }
497
498 pub fn write_improved(&mut self) -> io::IoResult<()> {
499 self.write_pretty("improved", term::color::GREEN)
500 }
501
502 pub fn write_removed(&mut self) -> io::IoResult<()> {
503 self.write_pretty("removed", term::color::YELLOW)
504 }
505
506 pub fn write_regressed(&mut self) -> io::IoResult<()> {
507 self.write_pretty("regressed", term::color::RED)
508 }
509
510 pub fn write_pretty(&mut self,
511 word: &str,
512 color: term::color::Color) -> io::IoResult<()> {
513 match self.out {
514 Pretty(ref mut term) => {
515 if self.use_color {
516 try!(term.fg(color));
517 }
518 try!(term.write(word.as_bytes()));
519 if self.use_color {
520 try!(term.reset());
521 }
522 Ok(())
523 }
524 Raw(ref mut stdout) => stdout.write(word.as_bytes())
525 }
526 }
527
528 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
529 match self.out {
530 Pretty(ref mut term) => term.write(s.as_bytes()),
531 Raw(ref mut stdout) => stdout.write(s.as_bytes())
532 }
533 }
534
535 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
536 self.total = len;
537 let noun = if len != 1 { "tests" } else { "test" };
538 self.write_plain(format!("\nrunning {} {}\n", len, noun))
539 }
540
541 pub fn write_test_start(&mut self, test: &TestDesc,
542 align: NamePadding) -> io::IoResult<()> {
543 let name = test.padded_name(self.max_name_len, align);
544 self.write_plain(format!("test {} ... ", name))
545 }
546
547 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
548 try!(match *result {
549 TrOk => self.write_ok(),
550 TrFailed => self.write_failed(),
551 TrIgnored => self.write_ignored(),
552 TrMetrics(ref mm) => {
553 try!(self.write_metric());
554 self.write_plain(format!(": {}", fmt_metrics(mm)))
555 }
556 TrBench(ref bs) => {
557 try!(self.write_bench());
558 self.write_plain(format!(": {}", fmt_bench_samples(bs)))
559 }
560 });
561 self.write_plain("\n")
562 }
563
564 pub fn write_log(&mut self, test: &TestDesc,
565 result: &TestResult) -> io::IoResult<()> {
566 match self.log_out {
567 None => Ok(()),
568 Some(ref mut o) => {
569 let s = format!("{} {}\n", match *result {
570 TrOk => "ok".to_owned(),
571 TrFailed => "failed".to_owned(),
572 TrIgnored => "ignored".to_owned(),
573 TrMetrics(ref mm) => fmt_metrics(mm),
574 TrBench(ref bs) => fmt_bench_samples(bs)
575 }, test.name.to_str());
576 o.write(s.as_bytes())
577 }
578 }
579 }
580
581 pub fn write_failures(&mut self) -> io::IoResult<()> {
582 try!(self.write_plain("\nfailures:\n"));
583 let mut failures = Vec::new();
584 let mut fail_out = StrBuf::new();
585 for &(ref f, ref stdout) in self.failures.iter() {
586 failures.push(f.name.to_str());
587 if stdout.len() > 0 {
588 fail_out.push_str(format!("---- {} stdout ----\n\t",
589 f.name.to_str()));
590 let output = str::from_utf8_lossy(stdout.as_slice());
591 fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
592 fail_out.push_str("\n");
593 }
594 }
595 if fail_out.len() > 0 {
596 try!(self.write_plain("\n"));
597 try!(self.write_plain(fail_out.as_slice()));
598 }
599
600 try!(self.write_plain("\nfailures:\n"));
601 failures.as_mut_slice().sort();
602 for name in failures.iter() {
603 try!(self.write_plain(format!(" {}\n", name.to_str())));
604 }
605 Ok(())
606 }
607
608 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
609 let mut noise = 0;
610 let mut improved = 0;
611 let mut regressed = 0;
612 let mut added = 0;
613 let mut removed = 0;
614
615 for (k, v) in diff.iter() {
616 match *v {
617 LikelyNoise => noise += 1,
618 MetricAdded => {
619 added += 1;
620 try!(self.write_added());
621 try!(self.write_plain(format!(": {}\n", *k)));
622 }
623 MetricRemoved => {
624 removed += 1;
625 try!(self.write_removed());
626 try!(self.write_plain(format!(": {}\n", *k)));
627 }
628 Improvement(pct) => {
629 improved += 1;
630 try!(self.write_plain(format!(": {}", *k)));
631 try!(self.write_improved());
632 try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
633 }
634 Regression(pct) => {
635 regressed += 1;
636 try!(self.write_plain(format!(": {}", *k)));
637 try!(self.write_regressed());
638 try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
639 }
640 }
641 }
642 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
643 {} removed, {} improved, {} regressed, \
644 {} noise\n",
645 added, removed, improved, regressed,
646 noise)));
647 if regressed == 0 {
648 try!(self.write_plain("updated ratchet file\n"));
649 } else {
650 try!(self.write_plain("left ratchet file untouched\n"));
651 }
652 Ok(())
653 }
654
655 pub fn write_run_finish(&mut self,
656 ratchet_metrics: &Option<Path>,
657 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
658 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
659
660 let ratchet_success = match *ratchet_metrics {
661 None => true,
662 Some(ref pth) => {
663 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
664 pth.display())));
665 match ratchet_pct {
666 None => (),
667 Some(pct) =>
668 try!(self.write_plain(format!("with noise-tolerance \
669 forced to: {}%\n",
670 pct)))
671 }
672 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
673 try!(self.write_metric_diff(&diff));
674 ok
675 }
676 };
677
678 let test_success = self.failed == 0u;
679 if !test_success {
680 try!(self.write_failures());
681 }
682
683 let success = ratchet_success && test_success;
684
685 try!(self.write_plain("\ntest result: "));
686 if success {
687 // There's no parallelism at this point so it's safe to use color
688 try!(self.write_ok());
689 } else {
690 try!(self.write_failed());
691 }
692 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
693 self.passed, self.failed, self.ignored, self.measured);
694 try!(self.write_plain(s));
695 return Ok(success);
696 }
697 }
698
699 pub fn fmt_metrics(mm: &MetricMap) -> ~str {
700 let MetricMap(ref mm) = *mm;
701 let v : Vec<~str> = mm.iter()
702 .map(|(k,v)| format!("{}: {} (+/- {})",
703 *k,
704 v.value as f64,
705 v.noise as f64))
706 .collect();
707 v.connect(", ")
708 }
709
710 pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
711 if bs.mb_s != 0 {
712 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
713 bs.ns_iter_summ.median as uint,
714 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
715 bs.mb_s)
716 } else {
717 format!("{:>9} ns/iter (+/- {})",
718 bs.ns_iter_summ.median as uint,
719 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
720 }
721 }
722
723 // A simple console test runner
724 pub fn run_tests_console(opts: &TestOpts,
725 tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
726 fn callback<T: Writer>(event: &TestEvent,
727 st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
728 match (*event).clone() {
729 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
730 TeWait(ref test, padding) => st.write_test_start(test, padding),
731 TeResult(test, result, stdout) => {
732 try!(st.write_log(&test, &result));
733 try!(st.write_result(&result));
734 match result {
735 TrOk => st.passed += 1,
736 TrIgnored => st.ignored += 1,
737 TrMetrics(mm) => {
738 let tname = test.name.to_str();
739 let MetricMap(mm) = mm;
740 for (k,v) in mm.iter() {
741 st.metrics.insert_metric(tname + "." + *k,
742 v.value, v.noise);
743 }
744 st.measured += 1
745 }
746 TrBench(bs) => {
747 st.metrics.insert_metric(test.name.to_str(),
748 bs.ns_iter_summ.median,
749 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
750 st.measured += 1
751 }
752 TrFailed => {
753 st.failed += 1;
754 st.failures.push((test, stdout));
755 }
756 }
757 Ok(())
758 }
759 }
760 }
761 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
762 fn len_if_padded(t: &TestDescAndFn) -> uint {
763 match t.testfn.padding() {
764 PadNone => 0u,
765 PadOnLeft | PadOnRight => t.desc.name.to_str().len(),
766 }
767 }
768 match tests.iter().max_by(|t|len_if_padded(*t)) {
769 Some(t) => {
770 let n = t.desc.name.to_str();
771 st.max_name_len = n.len();
772 },
773 None => {}
774 }
775 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
776 match opts.save_metrics {
777 None => (),
778 Some(ref pth) => {
779 try!(st.metrics.save(pth));
780 try!(st.write_plain(format!("\nmetrics saved to: {}",
781 pth.display())));
782 }
783 }
784 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
785 }
786
787 #[test]
788 fn should_sort_failures_before_printing_them() {
789 use std::io::MemWriter;
790 use std::str;
791
792 let test_a = TestDesc {
793 name: StaticTestName("a"),
794 ignore: false,
795 should_fail: false
796 };
797
798 let test_b = TestDesc {
799 name: StaticTestName("b"),
800 ignore: false,
801 should_fail: false
802 };
803
804 let mut st = ConsoleTestState {
805 log_out: None,
806 out: Raw(MemWriter::new()),
807 use_color: false,
808 total: 0u,
809 passed: 0u,
810 failed: 0u,
811 ignored: 0u,
812 measured: 0u,
813 max_name_len: 10u,
814 metrics: MetricMap::new(),
815 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
816 };
817
818 st.write_failures().unwrap();
819 let s = match st.out {
820 Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
821 Pretty(_) => unreachable!()
822 };
823
824 let apos = s.as_slice().find_str("a").unwrap();
825 let bpos = s.as_slice().find_str("b").unwrap();
826 assert!(apos < bpos);
827 }
828
829 fn use_color() -> bool { return get_concurrency() == 1; }
830
831 #[deriving(Clone)]
832 enum TestEvent {
833 TeFiltered(Vec<TestDesc> ),
834 TeWait(TestDesc, NamePadding),
835 TeResult(TestDesc, TestResult, Vec<u8> ),
836 }
837
838 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
839
840 fn run_tests(opts: &TestOpts,
841 tests: Vec<TestDescAndFn> ,
842 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
843 let filtered_tests = filter_tests(opts, tests);
844 let filtered_descs = filtered_tests.iter()
845 .map(|t| t.desc.clone())
846 .collect();
847
848 try!(callback(TeFiltered(filtered_descs)));
849
850 let (filtered_tests, filtered_benchs_and_metrics) =
851 filtered_tests.partition(|e| {
852 match e.testfn {
853 StaticTestFn(_) | DynTestFn(_) => true,
854 _ => false
855 }
856 });
857
858 // It's tempting to just spawn all the tests at once, but since we have
859 // many tests that run in other processes we would be making a big mess.
860 let concurrency = get_concurrency();
861
862 let mut remaining = filtered_tests;
863 remaining.reverse();
864 let mut pending = 0;
865
866 let (tx, rx) = channel::<MonitorMsg>();
867
868 while pending > 0 || !remaining.is_empty() {
869 while pending < concurrency && !remaining.is_empty() {
870 let test = remaining.pop().unwrap();
871 if concurrency == 1 {
872 // We are doing one test at a time so we can print the name
873 // of the test before we run it. Useful for debugging tests
874 // that hang forever.
875 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
876 }
877 run_test(opts, !opts.run_tests, test, tx.clone());
878 pending += 1;
879 }
880
881 let (desc, result, stdout) = rx.recv();
882 if concurrency != 1 {
883 try!(callback(TeWait(desc.clone(), PadNone)));
884 }
885 try!(callback(TeResult(desc, result, stdout)));
886 pending -= 1;
887 }
888
889 // All benchmarks run at the end, in serial.
890 // (this includes metric fns)
891 for b in filtered_benchs_and_metrics.move_iter() {
892 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
893 run_test(opts, !opts.run_benchmarks, b, tx.clone());
894 let (test, result, stdout) = rx.recv();
895 try!(callback(TeResult(test, result, stdout)));
896 }
897 Ok(())
898 }
899
900 fn get_concurrency() -> uint {
901 use std::rt;
902 match os::getenv("RUST_TEST_TASKS") {
903 Some(s) => {
904 let opt_n: Option<uint> = FromStr::from_str(s);
905 match opt_n {
906 Some(n) if n > 0 => n,
907 _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
908 }
909 }
910 None => {
911 rt::default_sched_threads()
912 }
913 }
914 }
915
916 pub fn filter_tests(
917 opts: &TestOpts,
918 tests: Vec<TestDescAndFn> ) -> Vec<TestDescAndFn> {
919 let mut filtered = tests;
920
921 // Remove tests that don't match the test filter
922 filtered = if opts.filter.is_none() {
923 filtered
924 } else {
925 let filter_str = match opts.filter {
926 Some(ref f) => (*f).clone(),
927 None => "".to_owned()
928 };
929
930 fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
931 Option<TestDescAndFn> {
932 if test.desc.name.to_str().contains(filter_str) {
933 return Some(test);
934 } else {
935 return None;
936 }
937 }
938
939 filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
940 };
941
942 // Maybe pull out the ignored test and unignore them
943 filtered = if !opts.run_ignored {
944 filtered
945 } else {
946 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
947 if test.desc.ignore {
948 let TestDescAndFn {desc, testfn} = test;
949 Some(TestDescAndFn {
950 desc: TestDesc {ignore: false, ..desc},
951 testfn: testfn
952 })
953 } else {
954 None
955 }
956 };
957 filtered.move_iter().filter_map(|x| filter(x)).collect()
958 };
959
960 // Sort the tests alphabetically
961 filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str()));
962
963 // Shard the remaining tests, if sharding requested.
964 match opts.test_shard {
965 None => filtered,
966 Some((a,b)) => {
967 filtered.move_iter().enumerate()
968 .filter(|&(i,_)| i % b == a)
969 .map(|(_,t)| t)
970 .collect()
971 }
972 }
973 }
974
975 pub fn run_test(opts: &TestOpts,
976 force_ignore: bool,
977 test: TestDescAndFn,
978 monitor_ch: Sender<MonitorMsg>) {
979
980 let TestDescAndFn {desc, testfn} = test;
981
982 if force_ignore || desc.ignore {
983 monitor_ch.send((desc, TrIgnored, Vec::new()));
984 return;
985 }
986
987 #[allow(deprecated_owned_vector)]
988 fn run_test_inner(desc: TestDesc,
989 monitor_ch: Sender<MonitorMsg>,
990 nocapture: bool,
991 testfn: proc():Send) {
992 spawn(proc() {
993 let (tx, rx) = channel();
994 let mut reader = ChanReader::new(rx);
995 let stdout = ChanWriter::new(tx.clone());
996 let stderr = ChanWriter::new(tx);
997 let mut task = TaskBuilder::new().named(match desc.name {
998 DynTestName(ref name) => name.clone().into_maybe_owned(),
999 StaticTestName(name) => name.into_maybe_owned(),
1000 });
1001 if nocapture {
1002 drop((stdout, stderr));
1003 } else {
1004 task.opts.stdout = Some(box stdout as Box<Writer:Send>);
1005 task.opts.stderr = Some(box stderr as Box<Writer:Send>);
1006 }
1007 let result_future = task.future_result();
1008 task.spawn(testfn);
1009
1010 let stdout = reader.read_to_end().unwrap().move_iter().collect();
1011 let task_result = result_future.recv();
1012 let test_result = calc_result(&desc, task_result.is_ok());
1013 monitor_ch.send((desc.clone(), test_result, stdout));
1014 })
1015 }
1016
1017 match testfn {
1018 DynBenchFn(bencher) => {
1019 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1020 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1021 return;
1022 }
1023 StaticBenchFn(benchfn) => {
1024 let bs = ::bench::benchmark(|harness| benchfn(harness));
1025 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1026 return;
1027 }
1028 DynMetricFn(f) => {
1029 let mut mm = MetricMap::new();
1030 f(&mut mm);
1031 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1032 return;
1033 }
1034 StaticMetricFn(f) => {
1035 let mut mm = MetricMap::new();
1036 f(&mut mm);
1037 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1038 return;
1039 }
1040 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1041 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1042 proc() f())
1043 }
1044 }
1045
1046 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1047 if task_succeeded {
1048 if desc.should_fail { TrFailed }
1049 else { TrOk }
1050 } else {
1051 if desc.should_fail { TrOk }
1052 else { TrFailed }
1053 }
1054 }
1055
1056
1057 impl ToJson for Metric {
1058 fn to_json(&self) -> json::Json {
1059 let mut map = box TreeMap::new();
1060 map.insert("value".to_owned(), json::Number(self.value));
1061 map.insert("noise".to_owned(), json::Number(self.noise));
1062 json::Object(map)
1063 }
1064 }
1065
1066
1067 impl MetricMap {
1068
1069 pub fn new() -> MetricMap {
1070 MetricMap(TreeMap::new())
1071 }
1072
1073 /// Load MetricDiff from a file.
1074 ///
1075 /// # Failure
1076 ///
1077 /// This function will fail if the path does not exist or the path does not
1078 /// contain a valid metric map.
1079 pub fn load(p: &Path) -> MetricMap {
1080 assert!(p.exists());
1081 let mut f = File::open(p).unwrap();
1082 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1083 let mut decoder = json::Decoder::new(value);
1084 MetricMap(match Decodable::decode(&mut decoder) {
1085 Ok(t) => t,
1086 Err(e) => fail!("failure decoding JSON: {}", e)
1087 })
1088 }
1089
1090 /// Write MetricDiff to a file.
1091 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1092 let mut file = try!(File::create(p));
1093 let MetricMap(ref map) = *self;
1094 map.to_json().to_pretty_writer(&mut file)
1095 }
1096
1097 /// Compare against another MetricMap. Optionally compare all
1098 /// measurements in the maps using the provided `noise_pct` as a
1099 /// percentage of each value to consider noise. If `None`, each
1100 /// measurement's noise threshold is independently chosen as the
1101 /// maximum of that measurement's recorded noise quantity in either
1102 /// map.
1103 pub fn compare_to_old(&self, old: &MetricMap,
1104 noise_pct: Option<f64>) -> MetricDiff {
1105 let mut diff : MetricDiff = TreeMap::new();
1106 let MetricMap(ref selfmap) = *self;
1107 let MetricMap(ref old) = *old;
1108 for (k, vold) in old.iter() {
1109 let r = match selfmap.find(k) {
1110 None => MetricRemoved,
1111 Some(v) => {
1112 let delta = v.value - vold.value;
1113 let noise = match noise_pct {
1114 None => vold.noise.abs().max(v.noise.abs()),
1115 Some(pct) => vold.value * pct / 100.0
1116 };
1117 if delta.abs() <= noise {
1118 LikelyNoise
1119 } else {
1120 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1121 if vold.noise < 0.0 {
1122 // When 'noise' is negative, it means we want
1123 // to see deltas that go up over time, and can
1124 // only tolerate slight negative movement.
1125 if delta < 0.0 {
1126 Regression(pct)
1127 } else {
1128 Improvement(pct)
1129 }
1130 } else {
1131 // When 'noise' is positive, it means we want
1132 // to see deltas that go down over time, and
1133 // can only tolerate slight positive movements.
1134 if delta < 0.0 {
1135 Improvement(pct)
1136 } else {
1137 Regression(pct)
1138 }
1139 }
1140 }
1141 }
1142 };
1143 diff.insert((*k).clone(), r);
1144 }
1145 let MetricMap(ref map) = *self;
1146 for (k, _) in map.iter() {
1147 if !diff.contains_key(k) {
1148 diff.insert((*k).clone(), MetricAdded);
1149 }
1150 }
1151 diff
1152 }
1153
1154 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1155 /// must be non-negative. The `noise` indicates the uncertainty of the
1156 /// metric, which doubles as the "noise range" of acceptable
1157 /// pairwise-regressions on this named value, when comparing from one
1158 /// metric to the next using `compare_to_old`.
1159 ///
1160 /// If `noise` is positive, then it means this metric is of a value
1161 /// you want to see grow smaller, so a change larger than `noise` in the
1162 /// positive direction represents a regression.
1163 ///
1164 /// If `noise` is negative, then it means this metric is of a value
1165 /// you want to see grow larger, so a change larger than `noise` in the
1166 /// negative direction represents a regression.
1167 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1168 let m = Metric {
1169 value: value,
1170 noise: noise
1171 };
1172 let MetricMap(ref mut map) = *self;
1173 map.insert(name.to_owned(), m);
1174 }
1175
1176 /// Attempt to "ratchet" an external metric file. This involves loading
1177 /// metrics from a metric file (if it exists), comparing against
1178 /// the metrics in `self` using `compare_to_old`, and rewriting the
1179 /// file to contain the metrics in `self` if none of the
1180 /// `MetricChange`s are `Regression`. Returns the diff as well
1181 /// as a boolean indicating whether the ratchet succeeded.
1182 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1183 let old = if p.exists() {
1184 MetricMap::load(p)
1185 } else {
1186 MetricMap::new()
1187 };
1188
1189 let diff : MetricDiff = self.compare_to_old(&old, pct);
1190 let ok = diff.iter().all(|(_, v)| {
1191 match *v {
1192 Regression(_) => false,
1193 _ => true
1194 }
1195 });
1196
1197 if ok {
1198 self.save(p).unwrap();
1199 }
1200 return (diff, ok)
1201 }
1202 }
1203
1204
1205 // Benchmarking
1206
1207 /// A function that is opaque to the optimizer, to allow benchmarks to
1208 /// pretend to use outputs to assist in avoiding dead-code
1209 /// elimination.
1210 ///
1211 /// This function is a no-op, and does not even read from `dummy`.
1212 pub fn black_box<T>(dummy: T) {
1213 // we need to "use" the argument in some way LLVM can't
1214 // introspect.
1215 unsafe {asm!("" : : "r"(&dummy))}
1216 }
1217
1218
1219 impl Bencher {
1220 /// Callback for benchmark functions to run in their body.
1221 pub fn iter<T>(&mut self, inner: || -> T) {
1222 self.ns_start = precise_time_ns();
1223 let k = self.iterations;
1224 for _ in range(0u64, k) {
1225 black_box(inner());
1226 }
1227 self.ns_end = precise_time_ns();
1228 }
1229
1230 pub fn ns_elapsed(&mut self) -> u64 {
1231 if self.ns_start == 0 || self.ns_end == 0 {
1232 0
1233 } else {
1234 self.ns_end - self.ns_start
1235 }
1236 }
1237
1238 pub fn ns_per_iter(&mut self) -> u64 {
1239 if self.iterations == 0 {
1240 0
1241 } else {
1242 self.ns_elapsed() / cmp::max(self.iterations, 1)
1243 }
1244 }
1245
1246 pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
1247 self.iterations = n;
1248 f(self);
1249 }
1250
1251 // This is a more statistics-driven benchmark algorithm
1252 pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> {
1253
1254 // Initial bench run to get ballpark figure.
1255 let mut n = 1_u64;
1256 self.bench_n(n, |x| f(x));
1257
1258 // Try to estimate iter count for 1ms falling back to 1m
1259 // iterations if first run took < 1ns.
1260 if self.ns_per_iter() == 0 {
1261 n = 1_000_000;
1262 } else {
1263 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1264 }
1265 // if the first run took more than 1ms we don't want to just
1266 // be left doing 0 iterations on every loop. The unfortunate
1267 // side effect of not being able to do as many runs is
1268 // automatically handled by the statistical analysis below
1269 // (i.e. larger error bars).
1270 if n == 0 { n = 1; }
1271
1272 let mut total_run = 0;
1273 let samples : &mut [f64] = [0.0_f64, ..50];
1274 loop {
1275 let loop_start = precise_time_ns();
1276
1277 for p in samples.mut_iter() {
1278 self.bench_n(n, |x| f(x));
1279 *p = self.ns_per_iter() as f64;
1280 };
1281
1282 stats::winsorize(samples, 5.0);
1283 let summ = stats::Summary::new(samples);
1284
1285 for p in samples.mut_iter() {
1286 self.bench_n(5 * n, |x| f(x));
1287 *p = self.ns_per_iter() as f64;
1288 };
1289
1290 stats::winsorize(samples, 5.0);
1291 let summ5 = stats::Summary::new(samples);
1292
1293 let now = precise_time_ns();
1294 let loop_run = now - loop_start;
1295
1296 // If we've run for 100ms and seem to have converged to a
1297 // stable median.
1298 if loop_run > 100_000_000 &&
1299 summ.median_abs_dev_pct < 1.0 &&
1300 summ.median - summ5.median < summ5.median_abs_dev {
1301 return summ5;
1302 }
1303
1304 total_run += loop_run;
1305 // Longest we ever run for is 3s.
1306 if total_run > 3_000_000_000 {
1307 return summ5;
1308 }
1309
1310 n *= 2;
1311 }
1312 }
1313 }
1314
1315 pub mod bench {
1316 use std::cmp;
1317 use super::{Bencher, BenchSamples};
1318
1319 pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1320 let mut bs = Bencher {
1321 iterations: 0,
1322 ns_start: 0,
1323 ns_end: 0,
1324 bytes: 0
1325 };
1326
1327 let ns_iter_summ = bs.auto_bench(f);
1328
1329 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1330 let iter_s = 1_000_000_000 / ns_iter;
1331 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1332
1333 BenchSamples {
1334 ns_iter_summ: ns_iter_summ,
1335 mb_s: mb_s as uint
1336 }
1337 }
1338 }
1339
1340 #[cfg(test)]
1341 mod tests {
1342 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1343 TestDesc, TestDescAndFn, TestOpts, run_test,
1344 Metric, MetricMap, MetricAdded, MetricRemoved,
1345 Improvement, Regression, LikelyNoise,
1346 StaticTestName, DynTestName, DynTestFn};
1347 use std::io::TempDir;
1348
1349 #[test]
1350 pub fn do_not_run_ignored_tests() {
1351 fn f() { fail!(); }
1352 let desc = TestDescAndFn {
1353 desc: TestDesc {
1354 name: StaticTestName("whatever"),
1355 ignore: true,
1356 should_fail: false
1357 },
1358 testfn: DynTestFn(proc() f()),
1359 };
1360 let (tx, rx) = channel();
1361 run_test(&TestOpts::new(), false, desc, tx);
1362 let (_, res, _) = rx.recv();
1363 assert!(res != TrOk);
1364 }
1365
1366 #[test]
1367 pub fn ignored_tests_result_in_ignored() {
1368 fn f() { }
1369 let desc = TestDescAndFn {
1370 desc: TestDesc {
1371 name: StaticTestName("whatever"),
1372 ignore: true,
1373 should_fail: false
1374 },
1375 testfn: DynTestFn(proc() f()),
1376 };
1377 let (tx, rx) = channel();
1378 run_test(&TestOpts::new(), false, desc, tx);
1379 let (_, res, _) = rx.recv();
1380 assert!(res == TrIgnored);
1381 }
1382
1383 #[test]
1384 fn test_should_fail() {
1385 fn f() { fail!(); }
1386 let desc = TestDescAndFn {
1387 desc: TestDesc {
1388 name: StaticTestName("whatever"),
1389 ignore: false,
1390 should_fail: true
1391 },
1392 testfn: DynTestFn(proc() f()),
1393 };
1394 let (tx, rx) = channel();
1395 run_test(&TestOpts::new(), false, desc, tx);
1396 let (_, res, _) = rx.recv();
1397 assert!(res == TrOk);
1398 }
1399
1400 #[test]
1401 fn test_should_fail_but_succeeds() {
1402 fn f() { }
1403 let desc = TestDescAndFn {
1404 desc: TestDesc {
1405 name: StaticTestName("whatever"),
1406 ignore: false,
1407 should_fail: true
1408 },
1409 testfn: DynTestFn(proc() f()),
1410 };
1411 let (tx, rx) = channel();
1412 run_test(&TestOpts::new(), false, desc, tx);
1413 let (_, res, _) = rx.recv();
1414 assert!(res == TrFailed);
1415 }
1416
1417 #[test]
1418 fn first_free_arg_should_be_a_filter() {
1419 let args = vec!("progname".to_owned(), "filter".to_owned());
1420 let opts = match parse_opts(args.as_slice()) {
1421 Some(Ok(o)) => o,
1422 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1423 };
1424 assert!("filter" == opts.filter.clone().unwrap());
1425 }
1426
1427 #[test]
1428 fn parse_ignored_flag() {
1429 let args = vec!("progname".to_owned(), "filter".to_owned(), "--ignored".to_owned());
1430 let opts = match parse_opts(args.as_slice()) {
1431 Some(Ok(o)) => o,
1432 _ => fail!("Malformed arg in parse_ignored_flag")
1433 };
1434 assert!((opts.run_ignored));
1435 }
1436
1437 #[test]
1438 pub fn filter_for_ignored_option() {
1439 // When we run ignored tests the test filter should filter out all the
1440 // unignored tests and flip the ignore flag on the rest to false
1441
1442 let mut opts = TestOpts::new();
1443 opts.run_tests = true;
1444 opts.run_ignored = true;
1445
1446 let tests = vec!(
1447 TestDescAndFn {
1448 desc: TestDesc {
1449 name: StaticTestName("1"),
1450 ignore: true,
1451 should_fail: false,
1452 },
1453 testfn: DynTestFn(proc() {}),
1454 },
1455 TestDescAndFn {
1456 desc: TestDesc {
1457 name: StaticTestName("2"),
1458 ignore: false,
1459 should_fail: false
1460 },
1461 testfn: DynTestFn(proc() {}),
1462 });
1463 let filtered = filter_tests(&opts, tests);
1464
1465 assert_eq!(filtered.len(), 1);
1466 assert_eq!(filtered.get(0).desc.name.to_str(), "1".to_owned());
1467 assert!(filtered.get(0).desc.ignore == false);
1468 }
1469
1470 #[test]
1471 pub fn sort_tests() {
1472 let mut opts = TestOpts::new();
1473 opts.run_tests = true;
1474
1475 let names =
1476 vec!("sha1::test".to_owned(), "int::test_to_str".to_owned(), "int::test_pow".to_owned(),
1477 "test::do_not_run_ignored_tests".to_owned(),
1478 "test::ignored_tests_result_in_ignored".to_owned(),
1479 "test::first_free_arg_should_be_a_filter".to_owned(),
1480 "test::parse_ignored_flag".to_owned(), "test::filter_for_ignored_option".to_owned(),
1481 "test::sort_tests".to_owned());
1482 let tests =
1483 {
1484 fn testfn() { }
1485 let mut tests = Vec::new();
1486 for name in names.iter() {
1487 let test = TestDescAndFn {
1488 desc: TestDesc {
1489 name: DynTestName((*name).clone()),
1490 ignore: false,
1491 should_fail: false
1492 },
1493 testfn: DynTestFn(testfn),
1494 };
1495 tests.push(test);
1496 }
1497 tests
1498 };
1499 let filtered = filter_tests(&opts, tests);
1500
1501 let expected =
1502 vec!("int::test_pow".to_owned(), "int::test_to_str".to_owned(), "sha1::test".to_owned(),
1503 "test::do_not_run_ignored_tests".to_owned(),
1504 "test::filter_for_ignored_option".to_owned(),
1505 "test::first_free_arg_should_be_a_filter".to_owned(),
1506 "test::ignored_tests_result_in_ignored".to_owned(),
1507 "test::parse_ignored_flag".to_owned(),
1508 "test::sort_tests".to_owned());
1509
1510 for (a, b) in expected.iter().zip(filtered.iter()) {
1511 assert!(*a == b.desc.name.to_str());
1512 }
1513 }
1514
1515 #[test]
1516 pub fn test_metricmap_compare() {
1517 let mut m1 = MetricMap::new();
1518 let mut m2 = MetricMap::new();
1519 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1520 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1521
1522 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1523 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1524
1525 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1526 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1527
1528 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1529 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1530
1531 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1532 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1533
1534 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1535 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1536
1537 let diff1 = m2.compare_to_old(&m1, None);
1538
1539 assert_eq!(*(diff1.find(&"in-both-noise".to_owned()).unwrap()), LikelyNoise);
1540 assert_eq!(*(diff1.find(&"in-first-noise".to_owned()).unwrap()), MetricRemoved);
1541 assert_eq!(*(diff1.find(&"in-second-noise".to_owned()).unwrap()), MetricAdded);
1542 assert_eq!(*(diff1.find(&"in-both-want-downwards-but-regressed".to_owned()).unwrap()),
1543 Regression(100.0));
1544 assert_eq!(*(diff1.find(&"in-both-want-downwards-and-improved".to_owned()).unwrap()),
1545 Improvement(50.0));
1546 assert_eq!(*(diff1.find(&"in-both-want-upwards-but-regressed".to_owned()).unwrap()),
1547 Regression(50.0));
1548 assert_eq!(*(diff1.find(&"in-both-want-upwards-and-improved".to_owned()).unwrap()),
1549 Improvement(100.0));
1550 assert_eq!(diff1.len(), 7);
1551
1552 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1553
1554 assert_eq!(*(diff2.find(&"in-both-noise".to_owned()).unwrap()), LikelyNoise);
1555 assert_eq!(*(diff2.find(&"in-first-noise".to_owned()).unwrap()), MetricRemoved);
1556 assert_eq!(*(diff2.find(&"in-second-noise".to_owned()).unwrap()), MetricAdded);
1557 assert_eq!(*(diff2.find(&"in-both-want-downwards-but-regressed".to_owned()).unwrap()),
1558 LikelyNoise);
1559 assert_eq!(*(diff2.find(&"in-both-want-downwards-and-improved".to_owned()).unwrap()),
1560 LikelyNoise);
1561 assert_eq!(*(diff2.find(&"in-both-want-upwards-but-regressed".to_owned()).unwrap()),
1562 LikelyNoise);
1563 assert_eq!(*(diff2.find(&"in-both-want-upwards-and-improved".to_owned()).unwrap()),
1564 LikelyNoise);
1565 assert_eq!(diff2.len(), 7);
1566 }
1567
1568 #[test]
1569 pub fn ratchet_test() {
1570
1571 let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
1572 let pth = dpth.path().join("ratchet.json");
1573
1574 let mut m1 = MetricMap::new();
1575 m1.insert_metric("runtime", 1000.0, 2.0);
1576 m1.insert_metric("throughput", 50.0, 2.0);
1577
1578 let mut m2 = MetricMap::new();
1579 m2.insert_metric("runtime", 1100.0, 2.0);
1580 m2.insert_metric("throughput", 50.0, 2.0);
1581
1582 m1.save(&pth).unwrap();
1583
1584 // Ask for a ratchet that should fail to advance.
1585 let (diff1, ok1) = m2.ratchet(&pth, None);
1586 assert_eq!(ok1, false);
1587 assert_eq!(diff1.len(), 2);
1588 assert_eq!(*(diff1.find(&"runtime".to_owned()).unwrap()), Regression(10.0));
1589 assert_eq!(*(diff1.find(&"throughput".to_owned()).unwrap()), LikelyNoise);
1590
1591 // Check that it was not rewritten.
1592 let m3 = MetricMap::load(&pth);
1593 let MetricMap(m3) = m3;
1594 assert_eq!(m3.len(), 2);
1595 assert_eq!(*(m3.find(&"runtime".to_owned()).unwrap()), Metric::new(1000.0, 2.0));
1596 assert_eq!(*(m3.find(&"throughput".to_owned()).unwrap()), Metric::new(50.0, 2.0));
1597
1598 // Ask for a ratchet with an explicit noise-percentage override,
1599 // that should advance.
1600 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1601 assert_eq!(ok2, true);
1602 assert_eq!(diff2.len(), 2);
1603 assert_eq!(*(diff2.find(&"runtime".to_owned()).unwrap()), LikelyNoise);
1604 assert_eq!(*(diff2.find(&"throughput".to_owned()).unwrap()), LikelyNoise);
1605
1606 // Check that it was rewritten.
1607 let m4 = MetricMap::load(&pth);
1608 let MetricMap(m4) = m4;
1609 assert_eq!(m4.len(), 2);
1610 assert_eq!(*(m4.find(&"runtime".to_owned()).unwrap()), Metric::new(1100.0, 2.0));
1611 assert_eq!(*(m4.find(&"throughput".to_owned()).unwrap()), Metric::new(50.0, 2.0));
1612 }
1613 }
libtest/lib.rs:899:1-899:1 -fn- definition:
fn get_concurrency() -> uint {
use std::rt;
match os::getenv("RUST_TEST_TASKS") {
references:- 2829: fn use_color() -> bool { return get_concurrency() == 1; }
--
859: // many tests that run in other processes we would be making a big mess.
860: let concurrency = get_concurrency();
libtest/lib.rs:988:4-988:4 -fn- definition:
fn run_test_inner(desc: TestDesc,
monitor_ch: Sender<MonitorMsg>,
nocapture: bool,
references:- 21040: DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1041: StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1042: proc() f())
libtest/lib.rs:250:1-250:1 -struct- definition:
pub struct TestOpts {
pub filter: Option<~str>,
pub run_ignored: bool,
references:- 8384: let test_opts = TestOpts {
385: filter: filter,
--
975: pub fn run_test(opts: &TestOpts,
976: force_ignore: bool,
libtest/lib.rs:189:16-189:16 -struct- definition:
pub struct MetricMap(TreeMap<~str,Metric>);
impl Clone for MetricMap {
fn clone(&self) -> MetricMap {
references:- 14424: TrIgnored,
425: TrMetrics(MetricMap),
426: TrBench(BenchSamples),
--
1078: /// contain a valid metric map.
1079: pub fn load(p: &Path) -> MetricMap {
1080: assert!(p.exists());
--
1102: /// map.
1103: pub fn compare_to_old(&self, old: &MetricMap,
1104: noise_pct: Option<f64>) -> MetricDiff {
libtest/lib.rs:128:45-128:45 -enum- definition:
// to support isolation of tests into tasks.
pub enum TestFn {
StaticTestFn(fn()),
references:- 2138: impl TestFn {
139: fn padding(&self) -> NamePadding {
--
173: pub desc: TestDesc,
174: pub testfn: TestFn,
175: }
libtest/lib.rs:165:19-165:19 -struct- definition:
pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
references:- 16164: // these.
166: pub struct TestDesc {
--
949: Some(TestDescAndFn {
950: desc: TestDesc {ignore: false, ..desc},
951: testfn: testfn
--
987: #[allow(deprecated_owned_vector)]
988: fn run_test_inner(desc: TestDesc,
989: monitor_ch: Sender<MonitorMsg>,
--
1046: fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1047: if task_succeeded {
libtest/lib.rs:974:1-974:1 -fn- definition:
pub fn run_test(opts: &TestOpts,
force_ignore: bool,
test: TestDescAndFn,
references:- 2876: }
877: run_test(opts, !opts.run_tests, test, tx.clone());
878: pending += 1;
--
892: try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
893: run_test(opts, !opts.run_benchmarks, b, tx.clone());
894: let (test, result, stdout) = rx.recv();
libtest/lib.rs:433:1-433:1 -struct- definition:
struct ConsoleTestState<T> {
log_out: Option<File>,
out: OutputLocation<T>,
references:- 4458: };
459: Ok(ConsoleTestState {
460: out: out,
--
726: fn callback<T: Writer>(event: &TestEvent,
727: st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
728: match (*event).clone() {
libtest/lib.rs:709:1-709:1 -fn- definition:
pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
if bs.mb_s != 0 {
format!("{:>9} ns/iter (+/- {}) = {} MB/s",
references:- 2557: try!(self.write_bench());
558: self.write_plain(format!(": {}", fmt_bench_samples(bs)))
559: }
--
573: TrMetrics(ref mm) => fmt_metrics(mm),
574: TrBench(ref bs) => fmt_bench_samples(bs)
575: }, test.name.to_str());
libtest/lib.rs:306:1-306:1 -fn- definition:
fn usage(binary: &str, helpstr: &str) {
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!("{}", getopts::usage(message, optgroups().as_slice()));
references:- 2348: if matches.opt_present("h") { usage(args[0], "h"); return None; }
349: if matches.opt_present("help") { usage(args[0], "help"); return None; }
libtest/lib.rs:420:23-420:23 -enum- definition:
pub enum TestResult {
TrOk,
TrFailed,
references:- 10834: TeWait(TestDesc, NamePadding),
835: TeResult(TestDesc, TestResult, Vec<u8> ),
836: }
--
1046: fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1047: if task_succeeded {
libtest/lib.rs:837:1-837:1 -NK_AS_STR_TODO- definition:
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
fn run_tests(opts: &TestOpts,
tests: Vec<TestDescAndFn> ,
references:- 3866: let (tx, rx) = channel::<MonitorMsg>();
--
988: fn run_test_inner(desc: TestDesc,
989: monitor_ch: Sender<MonitorMsg>,
990: nocapture: bool,
libtest/lib.rs:208:1-208:1 -NK_AS_STR_TODO- definition:
pub type MetricDiff = TreeMap<~str,MetricChange>;
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
references:- 5608: pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
609: let mut noise = 0;
--
1181: /// as a boolean indicating whether the ratchet succeeded.
1182: pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1183: let old = if p.exists() {
--
1189: let diff : MetricDiff = self.compare_to_old(&old, pct);
1190: let ok = diff.iter().all(|(_, v)| {
libtest/lib.rs:171:1-171:1 -struct- definition:
pub struct TestDescAndFn {
pub desc: TestDesc,
pub testfn: TestFn,
references:- 17240: StaticBenchFn(f) =>
241: TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
--
948: let TestDescAndFn {desc, testfn} = test;
949: Some(TestDescAndFn {
950: desc: TestDesc {ignore: false, ..desc},
--
976: force_ignore: bool,
977: test: TestDescAndFn,
978: monitor_ch: Sender<MonitorMsg>) {
980: let TestDescAndFn {desc, testfn} = test;
libtest/lib.rs:177:51-177:51 -struct- definition:
pub struct Metric {
value: f64,
noise: f64
references:- 24178: pub struct Metric {
--
184: pub fn new(value: f64, noise: f64) -> Metric {
185: Metric {value: value, noise: noise}
186: }
--
1167: pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1168: let m = Metric {
1169: value: value,
libtest/lib.rs:155:20-155:20 -struct- definition:
/// call to `iter`.
pub struct Bencher {
iterations: u64,
references:- 71319: pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1320: let mut bs = Bencher {
1321: iterations: 0,
libtest/lib.rs:698:1-698:1 -fn- definition:
pub fn fmt_metrics(mm: &MetricMap) -> ~str {
let MetricMap(ref mm) = *mm;
let v : Vec<~str> = mm.iter()
references:- 2572: TrIgnored => "ignored".to_owned(),
573: TrMetrics(ref mm) => fmt_metrics(mm),
574: TrBench(ref bs) => fmt_bench_samples(bs)
libtest/lib.rs:1319:4-1319:4 -fn- definition:
pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
let mut bs = Bencher {
iterations: 0,
references:- 21018: DynBenchFn(bencher) => {
1019: let bs = ::bench::benchmark(|harness| bencher.run(harness));
1020: monitor_ch.send((desc, TrBench(bs), Vec::new()));
--
1023: StaticBenchFn(benchfn) => {
1024: let bs = ::bench::benchmark(|harness| benchfn(harness));
1025: monitor_ch.send((desc, TrBench(bs), Vec::new()));
libtest/lib.rs:414:23-414:23 -struct- definition:
pub struct BenchSamples {
ns_iter_summ: stats::Summary<f64>,
mb_s: uint,
references:- 151333: BenchSamples {
1334: ns_iter_summ: ns_iter_summ,
libtest/lib.rs:284:1-284:1 -fn- definition:
fn optgroups() -> Vec<getopts::OptGroup> {
vec!(getopts::optflag("", "ignored", "Run ignored tests"),
getopts::optflag("", "test", "Run tests and not benchmarks"),
references:- 2342: let matches =
343: match getopts::getopts(args_, optgroups().as_slice()) {
344: Ok(m) => m,
libtest/lib.rs:97:19-97:19 -enum- definition:
enum NamePadding { PadNone, PadOnLeft, PadOnRight }
impl TestDesc {
fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
references:- 698: enum NamePadding { PadNone, PadOnLeft, PadOnRight }
--
100: impl TestDesc {
101: fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
102: use std::num::Saturating;
--
541: pub fn write_test_start(&mut self, test: &TestDesc,
542: align: NamePadding) -> io::IoResult<()> {
543: let name = test.padded_name(self.max_name_len, align);
--
833: TeFiltered(Vec<TestDesc> ),
834: TeWait(TestDesc, NamePadding),
835: TeResult(TestDesc, TestResult, Vec<u8> ),
libtest/lib.rs:83:19-83:19 -enum- definition:
pub enum TestName {
StaticTestName(&'static str),
DynTestName(~str)
references:- 487: }
88: impl fmt::Show for TestName {
89: fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
--
166: pub struct TestDesc {
167: pub name: TestName,
168: pub ignore: bool,
libtest/lib.rs:200:22-200:22 -enum- definition:
pub enum MetricChange {
LikelyNoise,
MetricAdded,
references:- 5199: /// Analysis of a single change in metric
201: pub enum MetricChange {
--
209: pub type MetricDiff = TreeMap<~str,MetricChange>;
libtest/lib.rs:831:19-831:19 -enum- definition:
enum TestEvent {
TeFiltered(Vec<TestDesc> ),
TeWait(TestDesc, NamePadding),
references:- 4832: enum TestEvent {
--
841: tests: Vec<TestDescAndFn> ,
842: callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
843: let filtered_tests = filter_tests(opts, tests);