(index<- ) ./libextra/test.rs
1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 #[doc(hidden)];
12
13 // Support code for rustc's built in test runner generator. Currently,
14 // none of this is meant for users. It is intended to support the
15 // simplest interface possible for representing and running tests
16 // while providing a base that other test frameworks may build off of.
17
18
19 use getopts;
20 use getopts::groups;
21 use json::ToJson;
22 use json;
23 use serialize::Decodable;
24 use sort;
25 use stats::Stats;
26 use stats;
27 use term;
28 use time::precise_time_ns;
29 use treemap::TreeMap;
30
31 use std::clone::Clone;
32 use std::comm::{stream, SharedChan, GenericPort, GenericChan};
33 use std::libc;
34 use std::io;
35 use std::result;
36 use std::task;
37 use std::to_str::ToStr;
38 use std::f64;
39 use std::os;
40
41
42 // The name of a test. By convention this follows the rules for rust
43 // paths; i.e. it should be a series of identifiers separated by double
44 // colons. This way if some test runner wants to arrange the tests
45 // hierarchically it may.
46
47 #[deriving(Clone)]
48 pub enum TestName {
49 StaticTestName(&'static str),
50 DynTestName(~str)
51 }
52 impl ToStr for TestName {
53 fn to_str(&self) -> ~str {
54 match (*self).clone() {
55 StaticTestName(s) => s.to_str(),
56 DynTestName(s) => s.to_str()
57 }
58 }
59 }
60
61 // A function that runs a test. If the function returns successfully,
62 // the test succeeds; if the function fails then the test fails. We
63 // may need to come up with a more clever definition of test in order
64 // to support isolation of tests into tasks.
65 pub enum TestFn {
66 StaticTestFn(extern fn()),
67 StaticBenchFn(extern fn(&mut BenchHarness)),
68 StaticMetricFn(~fn(&mut MetricMap)),
69 DynTestFn(~fn()),
70 DynMetricFn(~fn(&mut MetricMap)),
71 DynBenchFn(~fn(&mut BenchHarness))
72 }
73
74 // Structure passed to BenchFns
75 pub struct BenchHarness {
76 iterations: u64,
77 ns_start: u64,
78 ns_end: u64,
79 bytes: u64
80 }
81
82 // The definition of a single test. A test runner will run a list of
83 // these.
84 #[deriving(Clone)]
85 pub struct TestDesc {
86 name: TestName,
87 ignore: bool,
88 should_fail: bool
89 }
90
91 pub struct TestDescAndFn {
92 desc: TestDesc,
93 testfn: TestFn,
94 }
95
96 #[deriving(Clone, Encodable, Decodable, Eq)]
97 pub struct Metric {
98 value: f64,
99 noise: f64
100 }
101
102 #[deriving(Eq)]
103 pub struct MetricMap(TreeMap<~str,Metric>);
104
105 impl Clone for MetricMap {
106 fn clone(&self) -> MetricMap {
107 MetricMap((**self).clone())
108 }
109 }
110
111 /// Analysis of a single change in metric
112 #[deriving(Eq)]
113 pub enum MetricChange {
114 LikelyNoise,
115 MetricAdded,
116 MetricRemoved,
117 Improvement(f64),
118 Regression(f64)
119 }
120
121 pub type MetricDiff = TreeMap<~str,MetricChange>;
122
123 // The default console test runner. It accepts the command line
124 // arguments and a vector of test_descs.
125 pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
126 let opts =
127 match parse_opts(args) {
128 Ok(o) => o,
129 Err(msg) => fail!(msg)
130 };
131 if !run_tests_console(&opts, tests) { fail!("Some tests failed"); }
132 }
133
134 // A variant optimized for invocation with a static test vector.
135 // This will fail (intentionally) when fed any dynamic tests, because
136 // it is copying the static values out into a dynamic vector and cannot
137 // copy dynamic values. It is doing this because from this point on
138 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
139 // semantics into parallel test runners, which in turn requires a ~[]
140 // rather than a &[].
141 pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
142 let owned_tests = do tests.map |t| {
143 match t.testfn {
144 StaticTestFn(f) =>
145 TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
146
147 StaticBenchFn(f) =>
148 TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
149
150 _ => {
151 fail!("non-static tests passed to test::test_main_static");
152 }
153 }
154 };
155 test_main(args, owned_tests)
156 }
157
158 pub struct TestOpts {
159 filter: Option<~str>,
160 run_ignored: bool,
161 run_tests: bool,
162 run_benchmarks: bool,
163 ratchet_metrics: Option<Path>,
164 ratchet_noise_percent: Option<f64>,
165 save_metrics: Option<Path>,
166 test_shard: Option<(uint,uint)>,
167 logfile: Option<Path>
168 }
169
170 type OptRes = Result<TestOpts, ~str>;
171
172 fn optgroups() -> ~[getopts::groups::OptGroup] {
173 ~[groups::optflag("", "ignored", "Run ignored tests"),
174 groups::optflag("", "test", "Run tests and not benchmarks"),
175 groups::optflag("", "bench", "Run benchmarks instead of tests"),
176 groups::optflag("h", "help", "Display this message (longer with --help)"),
177 groups::optopt("", "save-metrics", "Location to save bench metrics",
178 "PATH"),
179 groups::optopt("", "ratchet-metrics",
180 "Location to load and save metrics from. The metrics \
181 loaded are cause benchmarks to fail if they run too \
182 slowly", "PATH"),
183 groups::optopt("", "ratchet-noise-percent",
184 "Tests within N% of the recorded metrics will be \
185 considered as passing", "PERCENTAGE"),
186 groups::optopt("", "logfile", "Write logs to the specified file instead \
187 of stdout", "PATH"),
188 groups::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
189 "A.B")]
190 }
191
192 fn usage(binary: &str, helpstr: &str) -> ! {
193 #[fixed_stack_segment]; #[inline(never)];
194
195 let message = fmt!("Usage: %s [OPTIONS] [FILTER]", binary);
196 println(groups::usage(message, optgroups()));
197 println("");
198 if helpstr == "help" {
199 println("\
200 The FILTER is matched against the name of all tests to run, and if any tests
201 have a substring match, only those tests are run.
202
203 By default, all tests are run in parallel. This can be altered with the
204 RUST_TEST_TASKS environment variable when running tests (set it to 1).
205
206 Test Attributes:
207
208 #[test] - Indicates a function is a test to be run. This function
209 takes no arguments.
210 #[bench] - Indicates a function is a benchmark to be run. This
211 function takes one argument (extra::test::BenchHarness).
212 #[should_fail] - This function (also labeled with #[test]) will only pass if
213 the code causes a failure (an assertion failure or fail!)
214 #[ignore] - When applied to a function which is already attributed as a
215 test, then the test runner will ignore these tests during
216 normal test runs. Running with --ignored will run these
217 tests. This may also be written as #[ignore(cfg(...))] to
218 ignore the test on certain configurations.");
219 }
220 unsafe { libc::exit(0) }
221 }
222
223 // Parses command line arguments into test options
224 pub fn parse_opts(args: &[~str]) -> OptRes {
225 let args_ = args.tail();
226 let matches =
227 match groups::getopts(args_, optgroups()) {
228 Ok(m) => m,
229 Err(f) => return Err(f.to_err_msg())
230 };
231
232 if matches.opt_present("h") { usage(args[0], "h"); }
233 if matches.opt_present("help") { usage(args[0], "help"); }
234
235 let filter =
236 if matches.free.len() > 0 {
237 Some((matches).free[0].clone())
238 } else {
239 None
240 };
241
242 let run_ignored = matches.opt_present("ignored");
243
244 let logfile = matches.opt_str("logfile");
245 let logfile = logfile.map_move(|s| Path(s));
246
247 let run_benchmarks = matches.opt_present("bench");
248 let run_tests = ! run_benchmarks ||
249 matches.opt_present("test");
250
251 let ratchet_metrics = matches.opt_str("ratchet-metrics");
252 let ratchet_metrics = ratchet_metrics.map_move(|s| Path(s));
253
254 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
255 let ratchet_noise_percent = ratchet_noise_percent.map_move(|s| from_str::<f64>(s).unwrap());
256
257 let save_metrics = matches.opt_str("save-metrics");
258 let save_metrics = save_metrics.map_move(|s| Path(s));
259
260 let test_shard = matches.opt_str("test-shard");
261 let test_shard = opt_shard(test_shard);
262
263 let test_opts = TestOpts {
264 filter: filter,
265 run_ignored: run_ignored,
266 run_tests: run_tests,
267 run_benchmarks: run_benchmarks,
268 ratchet_metrics: ratchet_metrics,
269 ratchet_noise_percent: ratchet_noise_percent,
270 save_metrics: save_metrics,
271 test_shard: test_shard,
272 logfile: logfile
273 };
274
275 Ok(test_opts)
276 }
277
278 pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
279 match maybestr {
280 None => None,
281 Some(s) => {
282 match s.split_iter('.').to_owned_vec() {
283 [a, b] => match (from_str::<uint>(a), from_str::<uint>(b)) {
284 (Some(a), Some(b)) => Some((a,b)),
285 _ => None
286 },
287 _ => None
288 }
289 }
290 }
291 }
292
293
294 #[deriving(Clone, Eq)]
295 pub struct BenchSamples {
296 ns_iter_summ: stats::Summary,
297 mb_s: uint
298 }
299
300 #[deriving(Clone, Eq)]
301 pub enum TestResult {
302 TrOk,
303 TrFailed,
304 TrIgnored,
305 TrMetrics(MetricMap),
306 TrBench(BenchSamples),
307 }
308
309 struct ConsoleTestState {
310 out: @io::Writer,
311 log_out: Option<@io::Writer>,
312 term: Option<term::Terminal>,
313 use_color: bool,
314 total: uint,
315 passed: uint,
316 failed: uint,
317 ignored: uint,
318 measured: uint,
319 metrics: MetricMap,
320 failures: ~[TestDesc]
321 }
322
323 impl ConsoleTestState {
324 pub fn new(opts: &TestOpts) -> ConsoleTestState {
325 let log_out = match opts.logfile {
326 Some(ref path) => match io::file_writer(path,
327 [io::Create,
328 io::Truncate]) {
329 result::Ok(w) => Some(w),
330 result::Err(ref s) => {
331 fail!("can't open output file: %s", *s)
332 }
333 },
334 None => None
335 };
336 let out = io::stdout();
337 let term = match term::Terminal::new(out) {
338 Err(_) => None,
339 Ok(t) => Some(t)
340 };
341 ConsoleTestState {
342 out: out,
343 log_out: log_out,
344 use_color: use_color(),
345 term: term,
346 total: 0u,
347 passed: 0u,
348 failed: 0u,
349 ignored: 0u,
350 measured: 0u,
351 metrics: MetricMap::new(),
352 failures: ~[]
353 }
354 }
355
356 pub fn write_ok(&self) {
357 self.write_pretty("ok", term::color::GREEN);
358 }
359
360 pub fn write_failed(&self) {
361 self.write_pretty("FAILED", term::color::RED);
362 }
363
364 pub fn write_ignored(&self) {
365 self.write_pretty("ignored", term::color::YELLOW);
366 }
367
368 pub fn write_metric(&self) {
369 self.write_pretty("metric", term::color::CYAN);
370 }
371
372 pub fn write_bench(&self) {
373 self.write_pretty("bench", term::color::CYAN);
374 }
375
376 pub fn write_added(&self) {
377 self.write_pretty("added", term::color::GREEN);
378 }
379
380 pub fn write_improved(&self) {
381 self.write_pretty("improved", term::color::GREEN);
382 }
383
384 pub fn write_removed(&self) {
385 self.write_pretty("removed", term::color::YELLOW);
386 }
387
388 pub fn write_regressed(&self) {
389 self.write_pretty("regressed", term::color::RED);
390 }
391
392 pub fn write_pretty(&self,
393 word: &str,
394 color: term::color::Color) {
395 match self.term {
396 None => self.out.write_str(word),
397 Some(ref t) => {
398 if self.use_color {
399 t.fg(color);
400 }
401 self.out.write_str(word);
402 if self.use_color {
403 t.reset();
404 }
405 }
406 }
407 }
408
409 pub fn write_run_start(&mut self, len: uint) {
410 self.total = len;
411 let noun = if len != 1 { &"tests" } else { &"test" };
412 self.out.write_line(fmt!("\nrunning %u %s", len, noun));
413 }
414
415 pub fn write_test_start(&self, test: &TestDesc) {
416 self.out.write_str(fmt!("test %s ... ", test.name.to_str()));
417 }
418
419 pub fn write_result(&self, result: &TestResult) {
420 match *result {
421 TrOk => self.write_ok(),
422 TrFailed => self.write_failed(),
423 TrIgnored => self.write_ignored(),
424 TrMetrics(ref mm) => {
425 self.write_metric();
426 self.out.write_str(": " + fmt_metrics(mm));
427 }
428 TrBench(ref bs) => {
429 self.write_bench();
430 self.out.write_str(": " + fmt_bench_samples(bs))
431 }
432 }
433 self.out.write_str(&"\n");
434 }
435
436 pub fn write_log(&self, test: &TestDesc, result: &TestResult) {
437 match self.log_out {
438 None => (),
439 Some(out) => {
440 out.write_line(fmt!("%s %s",
441 match *result {
442 TrOk => ~"ok",
443 TrFailed => ~"failed",
444 TrIgnored => ~"ignored",
445 TrMetrics(ref mm) => fmt_metrics(mm),
446 TrBench(ref bs) => fmt_bench_samples(bs)
447 }, test.name.to_str()));
448 }
449 }
450 }
451
452 pub fn write_failures(&self) {
453 self.out.write_line("\nfailures:");
454 let mut failures = ~[];
455 for f in self.failures.iter() {
456 failures.push(f.name.to_str());
457 }
458 sort::tim_sort(failures);
459 for name in failures.iter() {
460 self.out.write_line(fmt!(" %s", name.to_str()));
461 }
462 }
463
464 pub fn write_metric_diff(&self, diff: &MetricDiff) {
465 let mut noise = 0;
466 let mut improved = 0;
467 let mut regressed = 0;
468 let mut added = 0;
469 let mut removed = 0;
470
471 for (k, v) in diff.iter() {
472 match *v {
473 LikelyNoise => noise += 1,
474 MetricAdded => {
475 added += 1;
476 self.write_added();
477 self.out.write_line(fmt!(": %s", *k));
478 }
479 MetricRemoved => {
480 removed += 1;
481 self.write_removed();
482 self.out.write_line(fmt!(": %s", *k));
483 }
484 Improvement(pct) => {
485 improved += 1;
486 self.out.write_str(*k);
487 self.out.write_str(": ");
488 self.write_improved();
489 self.out.write_line(fmt!(" by %.2f%%", pct as float))
490 }
491 Regression(pct) => {
492 regressed += 1;
493 self.out.write_str(*k);
494 self.out.write_str(": ");
495 self.write_regressed();
496 self.out.write_line(fmt!(" by %.2f%%", pct as float))
497 }
498 }
499 }
500 self.out.write_line(fmt!("result of ratchet: %u matrics added, %u removed, \
501 %u improved, %u regressed, %u noise",
502 added, removed, improved, regressed, noise));
503 if regressed == 0 {
504 self.out.write_line("updated ratchet file")
505 } else {
506 self.out.write_line("left ratchet file untouched")
507 }
508 }
509
510 pub fn write_run_finish(&self,
511 ratchet_metrics: &Option<Path>,
512 ratchet_pct: Option<f64>) -> bool {
513 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
514
515 let ratchet_success = match *ratchet_metrics {
516 None => true,
517 Some(ref pth) => {
518 self.out.write_str(fmt!("\nusing metrics ratchet: %s\n", pth.to_str()));
519 match ratchet_pct {
520 None => (),
521 Some(pct) =>
522 self.out.write_str(fmt!("with noise-tolerance forced to: %f%%\n",
523 pct as float))
524 }
525 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
526 self.write_metric_diff(&diff);
527 ok
528 }
529 };
530
531 let test_success = self.failed == 0u;
532 if !test_success {
533 self.write_failures();
534 }
535
536 let success = ratchet_success && test_success;
537
538 self.out.write_str("\ntest result: ");
539 if success {
540 // There's no parallelism at this point so it's safe to use color
541 self.write_ok();
542 } else {
543 self.write_failed();
544 }
545 self.out.write_str(fmt!(". %u passed; %u failed; %u ignored; %u measured\n\n",
546 self.passed, self.failed, self.ignored, self.measured));
547 return success;
548 }
549 }
550
551 pub fn fmt_metrics(mm: &MetricMap) -> ~str {
552 let v : ~[~str] = mm.iter()
553 .map(|(k,v)| fmt!("%s: %f (+/- %f)",
554 *k,
555 v.value as float,
556 v.noise as float))
557 .collect();
558 v.connect(", ")
559 }
560
561 pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
562 if bs.mb_s != 0 {
563 fmt!("%u ns/iter (+/- %u) = %u MB/s",
564 bs.ns_iter_summ.median as uint,
565 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
566 bs.mb_s)
567 } else {
568 fmt!("%u ns/iter (+/- %u)",
569 bs.ns_iter_summ.median as uint,
570 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
571 }
572 }
573
574 // A simple console test runner
575 pub fn run_tests_console(opts: &TestOpts,
576 tests: ~[TestDescAndFn]) -> bool {
577 fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
578 debug!("callback(event=%?)", event);
579 match (*event).clone() {
580 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
581 TeWait(ref test) => st.write_test_start(test),
582 TeResult(test, result) => {
583 st.write_log(&test, &result);
584 st.write_result(&result);
585 match result {
586 TrOk => st.passed += 1,
587 TrIgnored => st.ignored += 1,
588 TrMetrics(mm) => {
589 let tname = test.name.to_str();
590 for (k,v) in mm.iter() {
591 st.metrics.insert_metric(tname + "." + *k,
592 v.value, v.noise);
593 }
594 st.measured += 1
595 }
596 TrBench(bs) => {
597 st.metrics.insert_metric(test.name.to_str(),
598 bs.ns_iter_summ.median,
599 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
600 st.measured += 1
601 }
602 TrFailed => {
603 st.failed += 1;
604 st.failures.push(test);
605 }
606 }
607 }
608 }
609 }
610 let st = @mut ConsoleTestState::new(opts);
611 run_tests(opts, tests, |x| callback(&x, st));
612 match opts.save_metrics {
613 None => (),
614 Some(ref pth) => {
615 st.metrics.save(pth);
616 st.out.write_str(fmt!("\nmetrics saved to: %s", pth.to_str()));
617 }
618 }
619 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
620 }
621
622 #[test]
623 fn should_sort_failures_before_printing_them() {
624 fn dummy() {}
625
626 let s = do io::with_str_writer |wr| {
627 let test_a = TestDesc {
628 name: StaticTestName("a"),
629 ignore: false,
630 should_fail: false
631 };
632
633 let test_b = TestDesc {
634 name: StaticTestName("b"),
635 ignore: false,
636 should_fail: false
637 };
638
639 let st = @ConsoleTestState {
640 out: wr,
641 log_out: None,
642 term: None,
643 use_color: false,
644 total: 0u,
645 passed: 0u,
646 failed: 0u,
647 ignored: 0u,
648 measured: 0u,
649 metrics: MetricMap::new(),
650 failures: ~[test_b, test_a]
651 };
652
653 st.write_failures();
654 };
655
656 let apos = s.find_str("a").unwrap();
657 let bpos = s.find_str("b").unwrap();
658 assert!(apos < bpos);
659 }
660
661 fn use_color() -> bool { return get_concurrency() == 1; }
662
663 #[deriving(Clone)]
664 enum TestEvent {
665 TeFiltered(~[TestDesc]),
666 TeWait(TestDesc),
667 TeResult(TestDesc, TestResult),
668 }
669
670 type MonitorMsg = (TestDesc, TestResult);
671
672 fn run_tests(opts: &TestOpts,
673 tests: ~[TestDescAndFn],
674 callback: &fn(e: TestEvent)) {
675
676 let filtered_tests = filter_tests(opts, tests);
677 let filtered_descs = filtered_tests.map(|t| t.desc.clone());
678
679 callback(TeFiltered(filtered_descs));
680
681 let (filtered_tests, filtered_benchs_and_metrics) =
682 do filtered_tests.partition |e| {
683 match e.testfn {
684 StaticTestFn(_) | DynTestFn(_) => true,
685 _ => false
686 }
687 };
688
689 // It's tempting to just spawn all the tests at once, but since we have
690 // many tests that run in other processes we would be making a big mess.
691 let concurrency = get_concurrency();
692 debug!("using %u test tasks", concurrency);
693
694 let mut remaining = filtered_tests;
695 remaining.reverse();
696 let mut pending = 0;
697
698 let (p, ch) = stream();
699 let ch = SharedChan::new(ch);
700
701 while pending > 0 || !remaining.is_empty() {
702 while pending < concurrency && !remaining.is_empty() {
703 let test = remaining.pop();
704 if concurrency == 1 {
705 // We are doing one test at a time so we can print the name
706 // of the test before we run it. Useful for debugging tests
707 // that hang forever.
708 callback(TeWait(test.desc.clone()));
709 }
710 run_test(!opts.run_tests, test, ch.clone());
711 pending += 1;
712 }
713
714 let (desc, result) = p.recv();
715 if concurrency != 1 {
716 callback(TeWait(desc.clone()));
717 }
718 callback(TeResult(desc, result));
719 pending -= 1;
720 }
721
722 // All benchmarks run at the end, in serial.
723 // (this includes metric fns)
724 for b in filtered_benchs_and_metrics.move_iter() {
725 callback(TeWait(b.desc.clone()));
726 run_test(!opts.run_benchmarks, b, ch.clone());
727 let (test, result) = p.recv();
728 callback(TeResult(test, result));
729 }
730 }
731
732 fn get_concurrency() -> uint {
733 use std::rt;
734 match os::getenv("RUST_TEST_TASKS") {
735 Some(s) => {
736 let opt_n: Option<uint> = FromStr::from_str(s);
737 match opt_n {
738 Some(n) if n > 0 => n,
739 _ => fail!("RUST_TEST_TASKS is `%s`, should be a positive integer.", s)
740 }
741 }
742 None => {
743 rt::util::default_sched_threads()
744 }
745 }
746 }
747
748 pub fn filter_tests(
749 opts: &TestOpts,
750 tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
751 {
752 let mut filtered = tests;
753
754 // Remove tests that don't match the test filter
755 filtered = if opts.filter.is_none() {
756 filtered
757 } else {
758 let filter_str = match opts.filter {
759 Some(ref f) => (*f).clone(),
760 None => ~""
761 };
762
763 fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
764 Option<TestDescAndFn> {
765 if test.desc.name.to_str().contains(filter_str) {
766 return Some(test);
767 } else {
768 return None;
769 }
770 }
771
772 filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
773 };
774
775 // Maybe pull out the ignored test and unignore them
776 filtered = if !opts.run_ignored {
777 filtered
778 } else {
779 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
780 if test.desc.ignore {
781 let TestDescAndFn {desc, testfn} = test;
782 Some(TestDescAndFn {
783 desc: TestDesc {ignore: false, ..desc},
784 testfn: testfn
785 })
786 } else {
787 None
788 }
789 };
790 filtered.move_iter().filter_map(|x| filter(x)).collect()
791 };
792
793 // Sort the tests alphabetically
794 fn lteq(t1: &TestDescAndFn, t2: &TestDescAndFn) -> bool {
795 t1.desc.name.to_str() < t2.desc.name.to_str()
796 }
797 sort::quick_sort(filtered, lteq);
798
799 // Shard the remaining tests, if sharding requested.
800 match opts.test_shard {
801 None => filtered,
802 Some((a,b)) =>
803 filtered.move_iter().enumerate()
804 .filter(|&(i,_)| i % b == a)
805 .map(|(_,t)| t)
806 .to_owned_vec()
807 }
808 }
809
810 pub fn run_test(force_ignore: bool,
811 test: TestDescAndFn,
812 monitor_ch: SharedChan<MonitorMsg>) {
813
814 let TestDescAndFn {desc, testfn} = test;
815
816 if force_ignore || desc.ignore {
817 monitor_ch.send((desc, TrIgnored));
818 return;
819 }
820
821 fn run_test_inner(desc: TestDesc,
822 monitor_ch: SharedChan<MonitorMsg>,
823 testfn: ~fn()) {
824 let testfn_cell = ::std::cell::Cell::new(testfn);
825 do task::spawn {
826 let mut result_future = None; // task::future_result(builder);
827
828 let mut task = task::task();
829 task.unlinked();
830 task.future_result(|r| { result_future = Some(r) });
831 task.spawn(testfn_cell.take());
832
833 let task_result = result_future.unwrap().recv();
834 let test_result = calc_result(&desc,
835 task_result == task::Success);
836 monitor_ch.send((desc.clone(), test_result));
837 }
838 }
839
840 match testfn {
841 DynBenchFn(benchfn) => {
842 let bs = ::test::bench::benchmark(benchfn);
843 monitor_ch.send((desc, TrBench(bs)));
844 return;
845 }
846 StaticBenchFn(benchfn) => {
847 let bs = ::test::bench::benchmark(benchfn);
848 monitor_ch.send((desc, TrBench(bs)));
849 return;
850 }
851 DynMetricFn(f) => {
852 let mut mm = MetricMap::new();
853 f(&mut mm);
854 monitor_ch.send((desc, TrMetrics(mm)));
855 return;
856 }
857 StaticMetricFn(f) => {
858 let mut mm = MetricMap::new();
859 f(&mut mm);
860 monitor_ch.send((desc, TrMetrics(mm)));
861 return;
862 }
863 DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
864 StaticTestFn(f) => run_test_inner(desc, monitor_ch, || f())
865 }
866 }
867
868 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
869 if task_succeeded {
870 if desc.should_fail { TrFailed }
871 else { TrOk }
872 } else {
873 if desc.should_fail { TrOk }
874 else { TrFailed }
875 }
876 }
877
878
879 impl ToJson for Metric {
880 fn to_json(&self) -> json::Json {
881 let mut map = ~TreeMap::new();
882 map.insert(~"value", json::Number(self.value as float));
883 map.insert(~"noise", json::Number(self.noise as float));
884 json::Object(map)
885 }
886 }
887
888 impl MetricMap {
889
890 pub fn new() -> MetricMap {
891 MetricMap(TreeMap::new())
892 }
893
894 /// Load MetricDiff from a file.
895 pub fn load(p: &Path) -> MetricMap {
896 assert!(os::path_exists(p));
897 let f = io::file_reader(p).unwrap();
898 let mut decoder = json::Decoder(json::from_reader(f).unwrap());
899 MetricMap(Decodable::decode(&mut decoder))
900 }
901
902 /// Write MetricDiff to a file.
903 pub fn save(&self, p: &Path) {
904 let f = io::file_writer(p, [io::Create, io::Truncate]).unwrap();
905 self.to_json().to_pretty_writer(f);
906 }
907
908 /// Compare against another MetricMap. Optionally compare all
909 /// measurements in the maps using the provided `noise_pct` as a
910 /// percentage of each value to consider noise. If `None`, each
911 /// measurement's noise threshold is independently chosen as the
912 /// maximum of that measurement's recorded noise quantity in either
913 /// map.
914 pub fn compare_to_old(&self, old: &MetricMap,
915 noise_pct: Option<f64>) -> MetricDiff {
916 let mut diff : MetricDiff = TreeMap::new();
917 for (k, vold) in old.iter() {
918 let r = match self.find(k) {
919 None => MetricRemoved,
920 Some(v) => {
921 let delta = (v.value - vold.value);
922 let noise = match noise_pct {
923 None => f64::max(vold.noise.abs(), v.noise.abs()),
924 Some(pct) => vold.value * pct / 100.0
925 };
926 if delta.abs() <= noise {
927 LikelyNoise
928 } else {
929 let pct = delta.abs() / (vold.value).max(&f64::epsilon) * 100.0;
930 if vold.noise < 0.0 {
931 // When 'noise' is negative, it means we want
932 // to see deltas that go up over time, and can
933 // only tolerate slight negative movement.
934 if delta < 0.0 {
935 Regression(pct)
936 } else {
937 Improvement(pct)
938 }
939 } else {
940 // When 'noise' is positive, it means we want
941 // to see deltas that go down over time, and
942 // can only tolerate slight positive movements.
943 if delta < 0.0 {
944 Improvement(pct)
945 } else {
946 Regression(pct)
947 }
948 }
949 }
950 }
951 };
952 diff.insert((*k).clone(), r);
953 }
954 for (k, _) in self.iter() {
955 if !diff.contains_key(k) {
956 diff.insert((*k).clone(), MetricAdded);
957 }
958 }
959 diff
960 }
961
962 /// Insert a named `value` (+/- `noise`) metric into the map. The value
963 /// must be non-negative. The `noise` indicates the uncertainty of the
964 /// metric, which doubles as the "noise range" of acceptable
965 /// pairwise-regressions on this named value, when comparing from one
966 /// metric to the next using `compare_to_old`.
967 ///
968 /// If `noise` is positive, then it means this metric is of a value
969 /// you want to see grow smaller, so a change larger than `noise` in the
970 /// positive direction represents a regression.
971 ///
972 /// If `noise` is negative, then it means this metric is of a value
973 /// you want to see grow larger, so a change larger than `noise` in the
974 /// negative direction represents a regression.
975 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
976 let m = Metric {
977 value: value,
978 noise: noise
979 };
980 self.insert(name.to_owned(), m);
981 }
982
983 /// Attempt to "ratchet" an external metric file. This involves loading
984 /// metrics from a metric file (if it exists), comparing against
985 /// the metrics in `self` using `compare_to_old`, and rewriting the
986 /// file to contain the metrics in `self` if none of the
987 /// `MetricChange`s are `Regression`. Returns the diff as well
988 /// as a boolean indicating whether the ratchet succeeded.
989 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
990 let old = if os::path_exists(p) {
991 MetricMap::load(p)
992 } else {
993 MetricMap::new()
994 };
995
996 let diff : MetricDiff = self.compare_to_old(&old, pct);
997 let ok = do diff.iter().all() |(_, v)| {
998 match *v {
999 Regression(_) => false,
1000 _ => true
1001 }
1002 };
1003
1004 if ok {
1005 debug!("rewriting file '%s' with updated metrics");
1006 self.save(p);
1007 }
1008 return (diff, ok)
1009 }
1010 }
1011
1012
1013 // Benchmarking
1014
1015 impl BenchHarness {
1016 /// Callback for benchmark functions to run in their body.
1017 pub fn iter(&mut self, inner:&fn()) {
1018 self.ns_start = precise_time_ns();
1019 let k = self.iterations;
1020 for _ in range(0u64, k) {
1021 inner();
1022 }
1023 self.ns_end = precise_time_ns();
1024 }
1025
1026 pub fn ns_elapsed(&mut self) -> u64 {
1027 if self.ns_start == 0 || self.ns_end == 0 {
1028 0
1029 } else {
1030 self.ns_end - self.ns_start
1031 }
1032 }
1033
1034 pub fn ns_per_iter(&mut self) -> u64 {
1035 if self.iterations == 0 {
1036 0
1037 } else {
1038 self.ns_elapsed() / self.iterations.max(&1)
1039 }
1040 }
1041
1042 pub fn bench_n(&mut self, n: u64, f: &fn(&mut BenchHarness)) {
1043 self.iterations = n;
1044 debug!("running benchmark for %u iterations",
1045 n as uint);
1046 f(self);
1047 }
1048
1049 // This is a more statistics-driven benchmark algorithm
1050 pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary {
1051
1052 // Initial bench run to get ballpark figure.
1053 let mut n = 1_u64;
1054 self.bench_n(n, |x| f(x));
1055
1056 // Try to estimate iter count for 1ms falling back to 1m
1057 // iterations if first run took < 1ns.
1058 if self.ns_per_iter() == 0 {
1059 n = 1_000_000;
1060 } else {
1061 n = 1_000_000 / self.ns_per_iter().max(&1);
1062 }
1063
1064 let mut total_run = 0;
1065 let samples : &mut [f64] = [0.0_f64, ..50];
1066 loop {
1067 let loop_start = precise_time_ns();
1068
1069 for p in samples.mut_iter() {
1070 self.bench_n(n as u64, |x| f(x));
1071 *p = self.ns_per_iter() as f64;
1072 };
1073
1074 stats::winsorize(samples, 5.0);
1075 let summ = stats::Summary::new(samples);
1076
1077 for p in samples.mut_iter() {
1078 self.bench_n(5 * n as u64, |x| f(x));
1079 *p = self.ns_per_iter() as f64;
1080 };
1081
1082 stats::winsorize(samples, 5.0);
1083 let summ5 = stats::Summary::new(samples);
1084
1085 debug!("%u samples, median %f, MAD=%f, MADP=%f",
1086 samples.len(),
1087 summ.median as float,
1088 summ.median_abs_dev as float,
1089 summ.median_abs_dev_pct as float);
1090
1091 let now = precise_time_ns();
1092 let loop_run = now - loop_start;
1093
1094 // If we've run for 100ms an seem to have converged to a
1095 // stable median.
1096 if loop_run > 100_000_000 &&
1097 summ.median_abs_dev_pct < 1.0 &&
1098 summ.median - summ5.median < summ5.median_abs_dev {
1099 return summ5;
1100 }
1101
1102 total_run += loop_run;
1103 // Longest we ever run for is 3s.
1104 if total_run > 3_000_000_000 {
1105 return summ5;
1106 }
1107
1108 n *= 2;
1109 }
1110 }
1111
1112
1113
1114
1115 }
1116
1117 pub mod bench {
1118 use test::{BenchHarness, BenchSamples};
1119
1120 pub fn benchmark(f: &fn(&mut BenchHarness)) -> BenchSamples {
1121
1122 let mut bs = BenchHarness {
1123 iterations: 0,
1124 ns_start: 0,
1125 ns_end: 0,
1126 bytes: 0
1127 };
1128
1129 let ns_iter_summ = bs.auto_bench(f);
1130
1131 let ns_iter = (ns_iter_summ.median as u64).max(&1);
1132 let iter_s = 1_000_000_000 / ns_iter;
1133 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1134
1135 BenchSamples {
1136 ns_iter_summ: ns_iter_summ,
1137 mb_s: mb_s as uint
1138 }
1139 }
1140 }
1141
1142 #[cfg(test)]
1143 mod tests {
1144 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1145 TestDesc, TestDescAndFn,
1146 Metric, MetricMap, MetricAdded, MetricRemoved,
1147 Improvement, Regression, LikelyNoise,
1148 StaticTestName, DynTestName, DynTestFn};
1149 use test::{TestOpts, run_test};
1150
1151 use std::comm::{stream, SharedChan};
1152 use tempfile;
1153 use std::os;
1154
1155 #[test]
1156 pub fn do_not_run_ignored_tests() {
1157 fn f() { fail!(); }
1158 let desc = TestDescAndFn {
1159 desc: TestDesc {
1160 name: StaticTestName("whatever"),
1161 ignore: true,
1162 should_fail: false
1163 },
1164 testfn: DynTestFn(|| f()),
1165 };
1166 let (p, ch) = stream();
1167 let ch = SharedChan::new(ch);
1168 run_test(false, desc, ch);
1169 let (_, res) = p.recv();
1170 assert!(res != TrOk);
1171 }
1172
1173 #[test]
1174 pub fn ignored_tests_result_in_ignored() {
1175 fn f() { }
1176 let desc = TestDescAndFn {
1177 desc: TestDesc {
1178 name: StaticTestName("whatever"),
1179 ignore: true,
1180 should_fail: false
1181 },
1182 testfn: DynTestFn(|| f()),
1183 };
1184 let (p, ch) = stream();
1185 let ch = SharedChan::new(ch);
1186 run_test(false, desc, ch);
1187 let (_, res) = p.recv();
1188 assert_eq!(res, TrIgnored);
1189 }
1190
1191 #[test]
1192 fn test_should_fail() {
1193 fn f() { fail!(); }
1194 let desc = TestDescAndFn {
1195 desc: TestDesc {
1196 name: StaticTestName("whatever"),
1197 ignore: false,
1198 should_fail: true
1199 },
1200 testfn: DynTestFn(|| f()),
1201 };
1202 let (p, ch) = stream();
1203 let ch = SharedChan::new(ch);
1204 run_test(false, desc, ch);
1205 let (_, res) = p.recv();
1206 assert_eq!(res, TrOk);
1207 }
1208
1209 #[test]
1210 fn test_should_fail_but_succeeds() {
1211 fn f() { }
1212 let desc = TestDescAndFn {
1213 desc: TestDesc {
1214 name: StaticTestName("whatever"),
1215 ignore: false,
1216 should_fail: true
1217 },
1218 testfn: DynTestFn(|| f()),
1219 };
1220 let (p, ch) = stream();
1221 let ch = SharedChan::new(ch);
1222 run_test(false, desc, ch);
1223 let (_, res) = p.recv();
1224 assert_eq!(res, TrFailed);
1225 }
1226
1227 #[test]
1228 fn first_free_arg_should_be_a_filter() {
1229 let args = ~[~"progname", ~"filter"];
1230 let opts = match parse_opts(args) {
1231 Ok(o) => o,
1232 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1233 };
1234 assert!("filter" == opts.filter.clone().unwrap());
1235 }
1236
1237 #[test]
1238 fn parse_ignored_flag() {
1239 let args = ~[~"progname", ~"filter", ~"--ignored"];
1240 let opts = match parse_opts(args) {
1241 Ok(o) => o,
1242 _ => fail!("Malformed arg in parse_ignored_flag")
1243 };
1244 assert!((opts.run_ignored));
1245 }
1246
1247 #[test]
1248 pub fn filter_for_ignored_option() {
1249 fn dummy() {}
1250
1251 // When we run ignored tests the test filter should filter out all the
1252 // unignored tests and flip the ignore flag on the rest to false
1253
1254 let opts = TestOpts {
1255 filter: None,
1256 run_ignored: true,
1257 logfile: None,
1258 run_tests: true,
1259 run_benchmarks: false,
1260 ratchet_noise_percent: None,
1261 ratchet_metrics: None,
1262 save_metrics: None,
1263 test_shard: None
1264 };
1265
1266 let tests = ~[
1267 TestDescAndFn {
1268 desc: TestDesc {
1269 name: StaticTestName("1"),
1270 ignore: true,
1271 should_fail: false,
1272 },
1273 testfn: DynTestFn(|| {}),
1274 },
1275 TestDescAndFn {
1276 desc: TestDesc {
1277 name: StaticTestName("2"),
1278 ignore: false,
1279 should_fail: false
1280 },
1281 testfn: DynTestFn(|| {}),
1282 },
1283 ];
1284 let filtered = filter_tests(&opts, tests);
1285
1286 assert_eq!(filtered.len(), 1);
1287 assert_eq!(filtered[0].desc.name.to_str(), ~"1");
1288 assert!(filtered[0].desc.ignore == false);
1289 }
1290
1291 #[test]
1292 pub fn sort_tests() {
1293 let opts = TestOpts {
1294 filter: None,
1295 run_ignored: false,
1296 logfile: None,
1297 run_tests: true,
1298 run_benchmarks: false,
1299 ratchet_noise_percent: None,
1300 ratchet_metrics: None,
1301 save_metrics: None,
1302 test_shard: None
1303 };
1304
1305 let names =
1306 ~[~"sha1::test", ~"int::test_to_str", ~"int::test_pow",
1307 ~"test::do_not_run_ignored_tests",
1308 ~"test::ignored_tests_result_in_ignored",
1309 ~"test::first_free_arg_should_be_a_filter",
1310 ~"test::parse_ignored_flag", ~"test::filter_for_ignored_option",
1311 ~"test::sort_tests"];
1312 let tests =
1313 {
1314 fn testfn() { }
1315 let mut tests = ~[];
1316 for name in names.iter() {
1317 let test = TestDescAndFn {
1318 desc: TestDesc {
1319 name: DynTestName((*name).clone()),
1320 ignore: false,
1321 should_fail: false
1322 },
1323 testfn: DynTestFn(testfn),
1324 };
1325 tests.push(test);
1326 }
1327 tests
1328 };
1329 let filtered = filter_tests(&opts, tests);
1330
1331 let expected =
1332 ~[~"int::test_pow", ~"int::test_to_str", ~"sha1::test",
1333 ~"test::do_not_run_ignored_tests",
1334 ~"test::filter_for_ignored_option",
1335 ~"test::first_free_arg_should_be_a_filter",
1336 ~"test::ignored_tests_result_in_ignored",
1337 ~"test::parse_ignored_flag",
1338 ~"test::sort_tests"];
1339
1340 for (a, b) in expected.iter().zip(filtered.iter()) {
1341 assert!(*a == b.desc.name.to_str());
1342 }
1343 }
1344
1345 #[test]
1346 pub fn test_metricmap_compare() {
1347 let mut m1 = MetricMap::new();
1348 let mut m2 = MetricMap::new();
1349 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1350 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1351
1352 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1353 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1354
1355 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1356 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1357
1358 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1359 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1360
1361 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1362 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1363
1364 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1365 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1366
1367 let diff1 = m2.compare_to_old(&m1, None);
1368
1369 assert_eq!(*(diff1.find(&~"in-both-noise").unwrap()), LikelyNoise);
1370 assert_eq!(*(diff1.find(&~"in-first-noise").unwrap()), MetricRemoved);
1371 assert_eq!(*(diff1.find(&~"in-second-noise").unwrap()), MetricAdded);
1372 assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").unwrap()),
1373 Regression(100.0));
1374 assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").unwrap()),
1375 Improvement(50.0));
1376 assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").unwrap()),
1377 Regression(50.0));
1378 assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").unwrap()),
1379 Improvement(100.0));
1380 assert_eq!(diff1.len(), 7);
1381
1382 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1383
1384 assert_eq!(*(diff2.find(&~"in-both-noise").unwrap()), LikelyNoise);
1385 assert_eq!(*(diff2.find(&~"in-first-noise").unwrap()), MetricRemoved);
1386 assert_eq!(*(diff2.find(&~"in-second-noise").unwrap()), MetricAdded);
1387 assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").unwrap()), LikelyNoise);
1388 assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").unwrap()), LikelyNoise);
1389 assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").unwrap()), LikelyNoise);
1390 assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").unwrap()), LikelyNoise);
1391 assert_eq!(diff2.len(), 7);
1392 }
1393
1394 pub fn ratchet_test() {
1395
1396 let dpth = tempfile::mkdtemp(&os::tmpdir(),
1397 "test-ratchet").expect("missing test for ratchet");
1398 let pth = dpth.push("ratchet.json");
1399
1400 let mut m1 = MetricMap::new();
1401 m1.insert_metric("runtime", 1000.0, 2.0);
1402 m1.insert_metric("throughput", 50.0, 2.0);
1403
1404 let mut m2 = MetricMap::new();
1405 m2.insert_metric("runtime", 1100.0, 2.0);
1406 m2.insert_metric("throughput", 50.0, 2.0);
1407
1408 m1.save(&pth);
1409
1410 // Ask for a ratchet that should fail to advance.
1411 let (diff1, ok1) = m2.ratchet(&pth, None);
1412 assert_eq!(ok1, false);
1413 assert_eq!(diff1.len(), 2);
1414 assert_eq!(*(diff1.find(&~"runtime").unwrap()), Regression(10.0));
1415 assert_eq!(*(diff1.find(&~"throughput").unwrap()), LikelyNoise);
1416
1417 // Check that it was not rewritten.
1418 let m3 = MetricMap::load(&pth);
1419 assert_eq!(m3.len(), 2);
1420 assert_eq!(*(m3.find(&~"runtime").unwrap()), Metric { value: 1000.0, noise: 2.0 });
1421 assert_eq!(*(m3.find(&~"throughput").unwrap()), Metric { value: 50.0, noise: 2.0 });
1422
1423 // Ask for a ratchet with an explicit noise-percentage override,
1424 // that should advance.
1425 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1426 assert_eq!(ok2, true);
1427 assert_eq!(diff2.len(), 2);
1428 assert_eq!(*(diff2.find(&~"runtime").unwrap()), LikelyNoise);
1429 assert_eq!(*(diff2.find(&~"throughput").unwrap()), LikelyNoise);
1430
1431 // Check that it was rewritten.
1432 let m4 = MetricMap::load(&pth);
1433 assert_eq!(m4.len(), 2);
1434 assert_eq!(*(m4.find(&~"runtime").unwrap()), Metric { value: 1100.0, noise: 2.0 });
1435 assert_eq!(*(m4.find(&~"throughput").unwrap()), Metric { value: 50.0, noise: 2.0 });
1436
1437 os::remove_dir_recursive(&dpth);
1438 }
1439 }
libextra/test.rs:90:1-90:1 -struct- definition:
pub struct TestDescAndFn {
references:-576: tests: ~[TestDescAndFn]) -> bool {
763: fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
750: tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
125: pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
814: let TestDescAndFn {desc, testfn} = test;
673: tests: ~[TestDescAndFn],
811: test: TestDescAndFn,
148: TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
779: fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
750: tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
779: fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
794: fn lteq(t1: &TestDescAndFn, t2: &TestDescAndFn) -> bool {
764: Option<TestDescAndFn> {
145: TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
141: pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
781: let TestDescAndFn {desc, testfn} = test;
782: Some(TestDescAndFn {
794: fn lteq(t1: &TestDescAndFn, t2: &TestDescAndFn) -> bool {
libextra/test.rs:64:45-64:45 -enum- definition:
// to support isolation of tests into tasks.
pub enum TestFn {
references:-93: testfn: TestFn,
libextra/test.rs:308:1-308:1 -struct- definition:
struct ConsoleTestState {
references:-323: impl ConsoleTestState {
324: pub fn new(opts: &TestOpts) -> ConsoleTestState {
341: ConsoleTestState {
577: fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
libextra/test.rs:112:16-112:16 -enum- definition:
#[deriving(Eq)]
pub enum MetricChange {
references:-112: #[deriving(Eq)]
112: #[deriving(Eq)]
121: pub type MetricDiff = TreeMap<~str,MetricChange>;
112: #[deriving(Eq)]
libextra/test.rs:96:45-96:45 -struct- definition:
#[deriving(Clone, Encodable, Decodable, Eq)]
pub struct Metric {
references:-96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
103: pub struct MetricMap(TreeMap<~str,Metric>);
96: #[deriving(Clone, Encodable, Decodable, Eq)]
879: impl ToJson for Metric {
96: #[deriving(Clone, Encodable, Decodable, Eq)]
976: let m = Metric {
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
96: #[deriving(Clone, Encodable, Decodable, Eq)]
<quote expansion>:
2: Metric{value:
libextra/test.rs:171:1-171:1 -fn- definition:
fn optgroups() -> ~[getopts::groups::OptGroup] {
references:-196: println(groups::usage(message, optgroups()));
227: match groups::getopts(args_, optgroups()) {
libextra/test.rs:277:1-277:1 -fn- definition:
pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
references:-261: let test_shard = opt_shard(test_shard);
libextra/test.rs:74:32-74:32 -struct- definition:
// Structure passed to BenchFns
pub struct BenchHarness {
references:-71: DynBenchFn(~fn(&mut BenchHarness))
1015: impl BenchHarness {
1050: pub fn auto_bench(&mut self, f: &fn(&mut BenchHarness)) -> stats::Summary {
1120: pub fn benchmark(f: &fn(&mut BenchHarness)) -> BenchSamples {
67: StaticBenchFn(extern fn(&mut BenchHarness)),
1122: let mut bs = BenchHarness {
1042: pub fn bench_n(&mut self, n: u64, f: &fn(&mut BenchHarness)) {
libextra/test.rs:779:8-779:8 -fn- definition:
fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
if test.desc.ignore {
references:-790: filtered.move_iter().filter_map(|x| filter(x)).collect()
libextra/test.rs:560:1-560:1 -fn- definition:
pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
references:-446: TrBench(ref bs) => fmt_bench_samples(bs)
430: self.out.write_str(": " + fmt_bench_samples(bs))
libextra/test.rs:671:1-671:1 -fn- definition:
fn run_tests(opts: &TestOpts,
references:-611: run_tests(opts, tests, |x| callback(&x, st));
libextra/test.rs:763:8-763:8 -fn- definition:
fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
Option<TestDescAndFn> {
references:-772: filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
libextra/test.rs:47:19-47:19 -enum- definition:
#[deriving(Clone)]
pub enum TestName {
references:-47: #[deriving(Clone)]
86: name: TestName,
47: #[deriving(Clone)]
52: impl ToStr for TestName {
libextra/test.rs:663:19-663:19 -enum- definition:
#[deriving(Clone)]
enum TestEvent {
references:-663: #[deriving(Clone)]
577: fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
663: #[deriving(Clone)]
674: callback: &fn(e: TestEvent)) {
libextra/test.rs:223:51-223:51 -fn- definition:
// Parses command line arguments into test options
pub fn parse_opts(args: &[~str]) -> OptRes {
references:-127: match parse_opts(args) {
libextra/test.rs:867:1-867:1 -fn- definition:
fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
references:-834: let test_result = calc_result(&desc,
libextra/test.rs:120:1-120:1 -ty- definition:
pub type MetricDiff = TreeMap<~str,MetricChange>;
references:-916: let mut diff : MetricDiff = TreeMap::new();
915: noise_pct: Option<f64>) -> MetricDiff {
464: pub fn write_metric_diff(&self, diff: &MetricDiff) {
996: let diff : MetricDiff = self.compare_to_old(&old, pct);
989: pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
libextra/test.rs:669:1-669:1 -ty- definition:
type MonitorMsg = (TestDesc, TestResult);
references:-812: monitor_ch: SharedChan<MonitorMsg>) {
822: monitor_ch: SharedChan<MonitorMsg>,
libextra/test.rs:809:1-809:1 -fn- definition:
pub fn run_test(force_ignore: bool,
references:-726: run_test(!opts.run_benchmarks, b, ch.clone());
710: run_test(!opts.run_tests, test, ch.clone());
libextra/test.rs:169:1-169:1 -ty- definition:
type OptRes = Result<TestOpts, ~str>;
references:-224: pub fn parse_opts(args: &[~str]) -> OptRes {
libextra/test.rs:731:1-731:1 -fn- definition:
fn get_concurrency() -> uint {
references:-691: let concurrency = get_concurrency();
661: fn use_color() -> bool { return get_concurrency() == 1; }
libextra/test.rs:191:1-191:1 -fn- definition:
fn usage(binary: &str, helpstr: &str) -> ! {
references:-233: if matches.opt_present("help") { usage(args[0], "help"); }
232: if matches.opt_present("h") { usage(args[0], "h"); }
libextra/test.rs:157:1-157:1 -struct- definition:
pub struct TestOpts {
references:-749: opts: &TestOpts,
263: let test_opts = TestOpts {
324: pub fn new(opts: &TestOpts) -> ConsoleTestState {
575: pub fn run_tests_console(opts: &TestOpts,
672: fn run_tests(opts: &TestOpts,
170: type OptRes = Result<TestOpts, ~str>;
libextra/test.rs:577:4-577:4 -fn- definition:
fn callback(event: &TestEvent, st: &mut ConsoleTestState) {
debug!("callback(event=%?)", event);
references:-611: run_tests(opts, tests, |x| callback(&x, st));
libextra/test.rs:84:19-84:19 -struct- definition:
#[deriving(Clone)]
pub struct TestDesc {
references:-667: TeResult(TestDesc, TestResult),
666: TeWait(TestDesc),
320: failures: ~[TestDesc]
665: TeFiltered(~[TestDesc]),
868: fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
783: desc: TestDesc {ignore: false, ..desc},
84: #[deriving(Clone)]
92: desc: TestDesc,
436: pub fn write_log(&self, test: &TestDesc, result: &TestResult) {
821: fn run_test_inner(desc: TestDesc,
415: pub fn write_test_start(&self, test: &TestDesc) {
670: type MonitorMsg = (TestDesc, TestResult);
84: #[deriving(Clone)]
84: #[deriving(Clone)]
84: #[deriving(Clone)]
libextra/test.rs:794:4-794:4 -fn- definition:
fn lteq(t1: &TestDescAndFn, t2: &TestDescAndFn) -> bool {
t1.desc.name.to_str() < t2.desc.name.to_str()
references:-797: sort::quick_sort(filtered, lteq);
libextra/test.rs:300:23-300:23 -enum- definition:
#[deriving(Clone, Eq)]
pub enum TestResult {
references:-300: #[deriving(Clone, Eq)]
667: TeResult(TestDesc, TestResult),
300: #[deriving(Clone, Eq)]
868: fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
300: #[deriving(Clone, Eq)]
300: #[deriving(Clone, Eq)]
670: type MonitorMsg = (TestDesc, TestResult);
419: pub fn write_result(&self, result: &TestResult) {
436: pub fn write_log(&self, test: &TestDesc, result: &TestResult) {
300: #[deriving(Clone, Eq)]
libextra/test.rs:550:1-550:1 -fn- definition:
pub fn fmt_metrics(mm: &MetricMap) -> ~str {
references:-426: self.out.write_str(": " + fmt_metrics(mm));
445: TrMetrics(ref mm) => fmt_metrics(mm),
libextra/test.rs:747:1-747:1 -fn- definition:
pub fn filter_tests(
references:-676: let filtered_tests = filter_tests(opts, tests);
libextra/test.rs:294:23-294:23 -struct- definition:
#[deriving(Clone, Eq)]
pub struct BenchSamples {
references:-294: #[deriving(Clone, Eq)]
306: TrBench(BenchSamples),
1135: BenchSamples {
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
561: pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
1120: pub fn benchmark(f: &fn(&mut BenchHarness)) -> BenchSamples {
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
294: #[deriving(Clone, Eq)]
libextra/test.rs:124:41-124:41 -fn- definition:
// arguments and a vector of test_descs.
pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
references:-155: test_main(args, owned_tests)
libextra/test.rs:574:32-574:32 -fn- definition:
// A simple console test runner
pub fn run_tests_console(opts: &TestOpts,
references:-131: if !run_tests_console(&opts, tests) { fail!("Some tests failed"); }
libextra/test.rs:1120:4-1120:4 -fn- definition:
pub fn benchmark(f: &fn(&mut BenchHarness)) -> BenchSamples {
references:-847: let bs = ::test::bench::benchmark(benchfn);
842: let bs = ::test::bench::benchmark(benchfn);
libextra/test.rs:660:1-660:1 -fn- definition:
fn use_color() -> bool { return get_concurrency() == 1; }
references:-344: use_color: use_color(),
libextra/test.rs:102:16-102:16 -struct- definition:
#[deriving(Eq)]
pub struct MetricMap(TreeMap<~str,Metric>);
references:-105: impl Clone for MetricMap {
70: DynMetricFn(~fn(&mut MetricMap)),
305: TrMetrics(MetricMap),
102: #[deriving(Eq)]
890: pub fn new() -> MetricMap {
68: StaticMetricFn(~fn(&mut MetricMap)),
106: fn clone(&self) -> MetricMap {
102: #[deriving(Eq)]
914: pub fn compare_to_old(&self, old: &MetricMap,
319: metrics: MetricMap,
551: pub fn fmt_metrics(mm: &MetricMap) -> ~str {
895: pub fn load(p: &Path) -> MetricMap {
888: impl MetricMap {
102: #[deriving(Eq)]
libextra/test.rs:821:4-821:4 -fn- definition:
fn run_test_inner(desc: TestDesc,
monitor_ch: SharedChan<MonitorMsg>,
references:-864: StaticTestFn(f) => run_test_inner(desc, monitor_ch, || f())
863: DynTestFn(f) => run_test_inner(desc, monitor_ch, f),