(index<- ) ./libstd/task/spawn.rs
1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 /*!**************************************************************************
12 * Spawning & linked failure
13 *
14 * Several data structures are involved in task management to allow properly
15 * propagating failure across linked/supervised tasks.
16 *
17 * (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of
18 * all tasks that are part of the group. Some tasks are 'members', which
19 * means if they fail, they will kill everybody else in the taskgroup.
20 * Other tasks are 'descendants', which means they will not kill tasks
21 * from this group, but can be killed by failing members.
22 *
23 * A new one of these is created each spawn_linked or spawn_supervised.
24 *
25 * (2) The "taskgroup" is a per-task control structure that tracks a task's
26 * spawn configuration. It contains a reference to its taskgroup_arc, a
27 * reference to its node in the ancestor list (below), and an optionally
28 * configured notification port. These are stored in TLS.
29 *
30 * (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which
31 * tracks 'generations' of taskgroups -- a group's ancestors are groups
32 * which (directly or transitively) spawn_supervised-ed them. Each task
33 * is recorded in the 'descendants' of each of its ancestor groups.
34 *
35 * Spawning a supervised task is O(n) in the number of generations still
36 * alive, and exiting (by success or failure) that task is also O(n).
37 *
38 * This diagram depicts the references between these data structures:
39 *
40 * linked_________________________________
41 * ___/ _________ \___
42 * / \ | group X | / \
43 * ( A ) - - - - - - - > | {A,B} {}|< - - -( B )
44 * \___/ |_________| \___/
45 * unlinked
46 * | __ (nil)
47 * | //| The following code causes this:
48 * |__ // /\ _________
49 * / \ // || | group Y | fn taskA() {
50 * ( C )- - - ||- - - > |{C} {D,E}| spawn(taskB);
51 * \___/ / \=====> |_________| spawn_unlinked(taskC);
52 * supervise /gen \ ...
53 * | __ \ 00 / }
54 * | //| \__/ fn taskB() { ... }
55 * |__ // /\ _________ fn taskC() {
56 * / \/ || | group Z | spawn_supervised(taskD);
57 * ( D )- - - ||- - - > | {D} {E} | ...
58 * \___/ / \=====> |_________| }
59 * supervise /gen \ fn taskD() {
60 * | __ \ 01 / spawn_supervised(taskE);
61 * | //| \__/ ...
62 * |__ // _________ }
63 * / \/ | group W | fn taskE() { ... }
64 * ( E )- - - - - - - > | {E} {} |
65 * \___/ |_________|
66 *
67 * "tcb" "taskgroup_arc"
68 * "ancestor_list"
69 *
70 ****************************************************************************/
71
72 #[doc(hidden)];
73
74 use prelude::*;
75
76 use cast::transmute;
77 use cast;
78 use cell::Cell;
79 use container::MutableMap;
80 use comm::{Chan, GenericChan, oneshot};
81 use hashmap::{HashSet, HashSetMoveIterator};
82 use local_data;
83 use task::{Failure, SingleThreaded};
84 use task::{Success, TaskOpts, TaskResult};
85 use task::unkillable;
86 use uint;
87 use util;
88 use unstable::sync::Exclusive;
89 use rt::in_green_task_context;
90 use rt::local::Local;
91 use rt::task::{Task, Sched};
92 use rt::shouldnt_be_public::{Scheduler, KillHandle, WorkQueue, Thread};
93 use rt::uv::uvio::UvEventLoop;
94
95 #[cfg(test)] use task::default_task_opts;
96 #[cfg(test)] use comm;
97 #[cfg(test)] use task;
98
99 struct TaskSet(HashSet<KillHandle>);
100
101 impl TaskSet {
102 #[inline]
103 fn new() -> TaskSet {
104 TaskSet(HashSet::new())
105 }
106 #[inline]
107 fn insert(&mut self, task: KillHandle) {
108 let didnt_overwrite = (**self).insert(task);
109 assert!(didnt_overwrite);
110 }
111 #[inline]
112 fn remove(&mut self, task: &KillHandle) {
113 let was_present = (**self).remove(task);
114 assert!(was_present);
115 }
116 #[inline]
117 fn move_iter(self) -> HashSetMoveIterator<KillHandle> {
118 (*self).move_iter()
119 }
120 }
121
122 // One of these per group of linked-failure tasks.
123 struct TaskGroupData {
124 // All tasks which might kill this group. When this is empty, the group
125 // can be "GC"ed (i.e., its link in the ancestor list can be removed).
126 members: TaskSet,
127 // All tasks unidirectionally supervised by (directly or transitively)
128 // tasks in this group.
129 descendants: TaskSet,
130 }
131 type TaskGroupArc = Exclusive<Option<TaskGroupData>>;
132
133 type TaskGroupInner<'self> = &'self mut Option<TaskGroupData>;
134
135 // A taskgroup is 'dead' when nothing can cause it to fail; only members can.
136 fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
137 tg.members.is_empty()
138 }
139
140 // A list-like structure by which taskgroups keep track of all ancestor groups
141 // which may kill them. Needed for tasks to be able to remove themselves from
142 // ancestor groups upon exit. The list has a node for each "generation", and
143 // ends either at the root taskgroup (which has no ancestors) or at a
144 // taskgroup which was spawned-unlinked. Tasks from intermediate generations
145 // have references to the middle of the list; when intermediate generations
146 // die, their node in the list will be collected at a descendant's spawn-time.
147 struct AncestorNode {
148 // Since the ancestor list is recursive, we end up with references to
149 // exclusives within other exclusives. This is dangerous business (if
150 // circular references arise, deadlock and memory leaks are imminent).
151 // Hence we assert that this counter monotonically decreases as we
152 // approach the tail of the list.
153 generation: uint,
154 // Handle to the tasks in the group of the current generation.
155 parent_group: TaskGroupArc,
156 // Recursive rest of the list.
157 ancestors: AncestorList,
158 }
159
160 struct AncestorList(Option<Exclusive<AncestorNode>>);
161
162 // Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety.
163 #[inline]
164 fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
165 unsafe {
166 x.with(blk)
167 }
168 }
169
170 #[inline]
171 fn access_ancestors<U>(x: &Exclusive<AncestorNode>,
172 blk: &fn(x: &mut AncestorNode) -> U) -> U {
173 unsafe {
174 x.with(blk)
175 }
176 }
177
178 #[inline] #[cfg(test)]
179 fn check_generation(younger: uint, older: uint) { assert!(younger > older); }
180 #[inline] #[cfg(not(test))]
181 fn check_generation(_younger: uint, _older: uint) { }
182
183 #[inline] #[cfg(test)]
184 fn incr_generation(ancestors: &AncestorList) -> uint {
185 ancestors.as_ref().map_default(0, |arc| access_ancestors(arc, |a| a.generation+1))
186 }
187 #[inline] #[cfg(not(test))]
188 fn incr_generation(_ancestors: &AncestorList) -> uint { 0 }
189
190 // Iterates over an ancestor list.
191 // (1) Runs forward_blk on each ancestral taskgroup in the list
192 // (2) If forward_blk "break"s, runs optional bail_blk on all ancestral
193 // taskgroups that forward_blk already ran on successfully (Note: bail_blk
194 // is NOT called on the block that forward_blk broke on!).
195 // (3) As a bonus, coalesces away all 'dead' taskgroup nodes in the list.
196 fn each_ancestor(list: &mut AncestorList,
197 bail_blk: &fn(TaskGroupInner),
198 forward_blk: &fn(TaskGroupInner) -> bool)
199 -> bool {
200 // "Kickoff" call - there was no last generation.
201 return !coalesce(list, bail_blk, forward_blk, uint::max_value);
202
203 // Recursively iterates, and coalesces afterwards if needed. Returns
204 // whether or not unwinding is needed (i.e., !successful iteration).
205 fn coalesce(list: &mut AncestorList,
206 bail_blk: &fn(TaskGroupInner),
207 forward_blk: &fn(TaskGroupInner) -> bool,
208 last_generation: uint) -> bool {
209 let (coalesce_this, early_break) =
210 iterate(list, bail_blk, forward_blk, last_generation);
211 // What should our next ancestor end up being?
212 if coalesce_this.is_some() {
213 // Needed coalesce. Our next ancestor becomes our old
214 // ancestor's next ancestor. ("next = old_next->next;")
215 *list = coalesce_this.unwrap();
216 }
217 return early_break;
218 }
219
220 // Returns an optional list-to-coalesce and whether unwinding is needed.
221 // Option<ancestor_list>:
222 // Whether or not the ancestor taskgroup being iterated over is
223 // dead or not; i.e., it has no more tasks left in it, whether or not
224 // it has descendants. If dead, the caller shall coalesce it away.
225 // bool:
226 // True if the supplied block did 'break', here or in any recursive
227 // calls. If so, must call the unwinder on all previous nodes.
228 fn iterate(ancestors: &mut AncestorList,
229 bail_blk: &fn(TaskGroupInner),
230 forward_blk: &fn(TaskGroupInner) -> bool,
231 last_generation: uint)
232 -> (Option<AncestorList>, bool) {
233 // At each step of iteration, three booleans are at play which govern
234 // how the iteration should behave.
235 // 'nobe_is_dead' - Should the list should be coalesced at this point?
236 // Largely unrelated to the other two.
237 // 'need_unwind' - Should we run the bail_blk at this point? (i.e.,
238 // do_continue was false not here, but down the line)
239 // 'do_continue' - Did the forward_blk succeed at this point? (i.e.,
240 // should we recurse? or should our callers unwind?)
241
242 let forward_blk = Cell::new(forward_blk);
243
244 // The map defaults to None, because if ancestors is None, we're at
245 // the end of the list, which doesn't make sense to coalesce.
246 do ancestors.as_ref().map_default((None,false)) |ancestor_arc| {
247 // NB: Takes a lock! (this ancestor node)
248 do access_ancestors(ancestor_arc) |nobe| {
249 // Argh, but we couldn't give it to coalesce() otherwise.
250 let forward_blk = forward_blk.take();
251 // Check monotonicity
252 check_generation(last_generation, nobe.generation);
253 /*##########################################################*
254 * Step 1: Look at this ancestor group (call iterator block).
255 *##########################################################*/
256 let mut nobe_is_dead = false;
257 let do_continue =
258 // NB: Takes a lock! (this ancestor node's parent group)
259 do access_group(&nobe.parent_group) |tg_opt| {
260 // Decide whether this group is dead. Note that the
261 // group being *dead* is disjoint from it *failing*.
262 nobe_is_dead = match *tg_opt {
263 Some(ref tg) => taskgroup_is_dead(tg),
264 None => nobe_is_dead
265 };
266 // Call iterator block. (If the group is dead, it's
267 // safe to skip it. This will leave our KillHandle
268 // hanging around in the group even after it's freed,
269 // but that's ok because, by virtue of the group being
270 // dead, nobody will ever kill-all (for) over it.)
271 if nobe_is_dead { true } else { forward_blk(tg_opt) }
272 };
273 /*##########################################################*
274 * Step 2: Recurse on the rest of the list; maybe coalescing.
275 *##########################################################*/
276 // 'need_unwind' is only set if blk returned true above, *and*
277 // the recursive call early-broke.
278 let mut need_unwind = false;
279 if do_continue {
280 // NB: Takes many locks! (ancestor nodes & parent groups)
281 need_unwind = coalesce(&mut nobe.ancestors, |tg| bail_blk(tg),
282 forward_blk, nobe.generation);
283 }
284 /*##########################################################*
285 * Step 3: Maybe unwind; compute return info for our caller.
286 *##########################################################*/
287 if need_unwind && !nobe_is_dead {
288 do access_group(&nobe.parent_group) |tg_opt| {
289 bail_blk(tg_opt)
290 }
291 }
292 // Decide whether our caller should unwind.
293 need_unwind = need_unwind || !do_continue;
294 // Tell caller whether or not to coalesce and/or unwind
295 if nobe_is_dead {
296 // Swap the list out here; the caller replaces us with it.
297 let rest = util::replace(&mut nobe.ancestors,
298 AncestorList(None));
299 (Some(rest), need_unwind)
300 } else {
301 (None, need_unwind)
302 }
303 }
304 }
305 }
306 }
307
308 // One of these per task.
309 pub struct Taskgroup {
310 // List of tasks with whose fates this one's is intertwined.
311 tasks: TaskGroupArc, // 'none' means the group has failed.
312 // Lists of tasks who will kill us if they fail, but whom we won't kill.
313 ancestors: AncestorList,
314 notifier: Option<AutoNotify>,
315 }
316
317 impl Drop for Taskgroup {
318 // Runs on task exit.
319 fn drop(&mut self) {
320 // If we are failing, the whole taskgroup needs to die.
321 do RuntimeGlue::with_task_handle_and_failing |me, failing| {
322 if failing {
323 for x in self.notifier.mut_iter() {
324 x.failed = true;
325 }
326 // Take everybody down with us. After this point, every
327 // other task in the group will see 'tg' as none, which
328 // indicates the whole taskgroup is failing (and forbids
329 // new spawns from succeeding).
330 let tg = do access_group(&self.tasks) |tg| { tg.take() };
331 // It's safe to send kill signals outside the lock, because
332 // we have a refcount on all kill-handles in the group.
333 kill_taskgroup(tg, me);
334 } else {
335 // Remove ourselves from the group(s).
336 do access_group(&self.tasks) |tg| {
337 leave_taskgroup(tg, me, true);
338 }
339 }
340 // It doesn't matter whether this happens before or after dealing
341 // with our own taskgroup, so long as both happen before we die.
342 // We remove ourself from every ancestor we can, so no cleanup; no
343 // break.
344 do each_ancestor(&mut self.ancestors, |_| {}) |ancestor_group| {
345 leave_taskgroup(ancestor_group, me, false);
346 true
347 };
348 }
349 }
350 }
351
352 pub fn Taskgroup(tasks: TaskGroupArc,
353 ancestors: AncestorList,
354 mut notifier: Option<AutoNotify>) -> Taskgroup {
355 for x in notifier.mut_iter() {
356 x.failed = false;
357 }
358
359 Taskgroup {
360 tasks: tasks,
361 ancestors: ancestors,
362 notifier: notifier
363 }
364 }
365
366 struct AutoNotify {
367 notify_chan: Chan<TaskResult>,
368 failed: bool,
369 }
370
371 impl Drop for AutoNotify {
372 fn drop(&mut self) {
373 let result = if self.failed { Failure } else { Success };
374 self.notify_chan.send(result);
375 }
376 }
377
378 fn AutoNotify(chan: Chan<TaskResult>) -> AutoNotify {
379 AutoNotify {
380 notify_chan: chan,
381 failed: true // Un-set above when taskgroup successfully made.
382 }
383 }
384
385 fn enlist_in_taskgroup(state: TaskGroupInner, me: KillHandle,
386 is_member: bool) -> bool {
387 let me = Cell::new(me); // :(
388 // If 'None', the group was failing. Can't enlist.
389 do state.as_mut().map_default(false) |group| {
390 (if is_member {
391 &mut group.members
392 } else {
393 &mut group.descendants
394 }).insert(me.take());
395 true
396 }
397 }
398
399 // NB: Runs in destructor/post-exit context. Can't 'fail'.
400 fn leave_taskgroup(state: TaskGroupInner, me: &KillHandle, is_member: bool) {
401 let me = Cell::new(me); // :(
402 // If 'None', already failing and we've already gotten a kill signal.
403 do state.as_mut().map |group| {
404 (if is_member {
405 &mut group.members
406 } else {
407 &mut group.descendants
408 }).remove(me.take());
409 };
410 }
411
412 // NB: Runs in destructor/post-exit context. Can't 'fail'.
413 fn kill_taskgroup(state: Option<TaskGroupData>, me: &KillHandle) {
414 // Might already be None, if somebody is failing simultaneously.
415 // That's ok; only one task needs to do the dirty work. (Might also
416 // see 'None' if somebody already failed and we got a kill signal.)
417 do state.map |TaskGroupData { members: members, descendants: descendants }| {
418 for sibling in members.move_iter() {
419 // Skip self - killing ourself won't do much good.
420 if &sibling != me {
421 RuntimeGlue::kill_task(sibling);
422 }
423 }
424 for child in descendants.move_iter() {
425 assert!(&child != me);
426 RuntimeGlue::kill_task(child);
427 }
428 };
429 // (note: multiple tasks may reach this point)
430 }
431
432 // FIXME (#2912): Work around core-vs-coretest function duplication. Can't use
433 // a proper closure because the #[test]s won't understand. Have to fake it.
434 fn taskgroup_key() -> local_data::Key<@@mut Taskgroup> {
435 unsafe { cast::transmute(-2) }
436 }
437
438 // Transitionary.
439 struct RuntimeGlue;
440 impl RuntimeGlue {
441 fn kill_task(mut handle: KillHandle) {
442 do handle.kill().map |killed_task| {
443 let killed_task = Cell::new(killed_task);
444 do Local::borrow |sched: &mut Scheduler| {
445 sched.enqueue_task(killed_task.take());
446 }
447 };
448 }
449
450 fn with_task_handle_and_failing(blk: &fn(&KillHandle, bool)) {
451 assert!(in_green_task_context());
452 unsafe {
453 // Can't use safe borrow, because the taskgroup destructor needs to
454 // access the scheduler again to send kill signals to other tasks.
455 let me: *mut Task = Local::unsafe_borrow();
456 blk((*me).death.kill_handle.get_ref(), (*me).unwinder.unwinding)
457 }
458 }
459
460 fn with_my_taskgroup<U>(blk: &fn(&Taskgroup) -> U) -> U {
461 assert!(in_green_task_context());
462 unsafe {
463 // Can't use safe borrow, because creating new hashmaps for the
464 // tasksets requires an rng, which needs to borrow the sched.
465 let me: *mut Task = Local::unsafe_borrow();
466 blk(match (*me).taskgroup {
467 None => {
468 // First task in its (unlinked/unsupervised) taskgroup.
469 // Lazily initialize.
470 let mut members = TaskSet::new();
471 let my_handle = (*me).death.kill_handle.get_ref().clone();
472 members.insert(my_handle);
473 let tasks = Exclusive::new(Some(TaskGroupData {
474 members: members,
475 descendants: TaskSet::new(),
476 }));
477 let group = Taskgroup(tasks, AncestorList(None), None);
478 (*me).taskgroup = Some(group);
479 (*me).taskgroup.get_ref()
480 }
481 Some(ref group) => group,
482 })
483 }
484 }
485 }
486
487 // Returns 'None' in the case where the child's TG should be lazily initialized.
488 fn gen_child_taskgroup(linked: bool, supervised: bool)
489 -> Option<(TaskGroupArc, AncestorList)> {
490 if linked || supervised {
491 // with_my_taskgroup will lazily initialize the parent's taskgroup if
492 // it doesn't yet exist. We don't want to call it in the unlinked case.
493 do RuntimeGlue::with_my_taskgroup |spawner_group| {
494 let ancestors = AncestorList(spawner_group.ancestors.as_ref().map(|x| x.clone()));
495 if linked {
496 // Child is in the same group as spawner.
497 // Child's ancestors are spawner's ancestors.
498 Some((spawner_group.tasks.clone(), ancestors))
499 } else {
500 // Child is in a separate group from spawner.
501 let g = Exclusive::new(Some(TaskGroupData {
502 members: TaskSet::new(),
503 descendants: TaskSet::new(),
504 }));
505 let a = if supervised {
506 let new_generation = incr_generation(&ancestors);
507 assert!(new_generation < uint::max_value);
508 // Child's ancestors start with the spawner.
509 // Build a new node in the ancestor list.
510 AncestorList(Some(Exclusive::new(AncestorNode {
511 generation: new_generation,
512 parent_group: spawner_group.tasks.clone(),
513 ancestors: ancestors,
514 })))
515 } else {
516 // Child has no ancestors.
517 AncestorList(None)
518 };
519 Some((g, a))
520 }
521 }
522 } else {
523 None
524 }
525 }
526
527 // Set up membership in taskgroup and descendantship in all ancestor
528 // groups. If any enlistment fails, Some task was already failing, so
529 // don't let the child task run, and undo every successful enlistment.
530 fn enlist_many(child: &KillHandle, child_arc: &TaskGroupArc,
531 ancestors: &mut AncestorList) -> bool {
532 // Join this taskgroup.
533 let mut result = do access_group(child_arc) |child_tg| {
534 enlist_in_taskgroup(child_tg, child.clone(), true) // member
535 };
536 if result {
537 // Unwinding function in case any ancestral enlisting fails
538 let bail: &fn(TaskGroupInner) = |tg| { leave_taskgroup(tg, child, false) };
539 // Attempt to join every ancestor group.
540 result = do each_ancestor(ancestors, bail) |ancestor_tg| {
541 // Enlist as a descendant, not as an actual member.
542 // Descendants don't kill ancestor groups on failure.
543 enlist_in_taskgroup(ancestor_tg, child.clone(), false)
544 };
545 // If any ancestor group fails, need to exit this group too.
546 if !result {
547 do access_group(child_arc) |child_tg| {
548 leave_taskgroup(child_tg, child, true); // member
549 }
550 }
551 }
552 result
553 }
554
555 pub fn spawn_raw(mut opts: TaskOpts, f: ~fn()) {
556 assert!(in_green_task_context());
557
558 let child_data = Cell::new(gen_child_taskgroup(opts.linked, opts.supervised));
559 let indestructible = opts.indestructible;
560
561 let child_wrapper: ~fn() = || {
562 // Child task runs this code.
563
564 // If child data is 'None', the enlist is vacuously successful.
565 let enlist_success = do child_data.take().map_default(true) |child_data| {
566 let child_data = Cell::new(child_data); // :(
567 do Local::borrow |me: &mut Task| {
568 let (child_tg, ancestors) = child_data.take();
569 let mut ancestors = ancestors;
570 let handle = me.death.kill_handle.get_ref();
571 // Atomically try to get into all of our taskgroups.
572 if enlist_many(handle, &child_tg, &mut ancestors) {
573 // Got in. We can run the provided child body, and can also run
574 // the taskgroup's exit-time-destructor afterward.
575 me.taskgroup = Some(Taskgroup(child_tg, ancestors, None));
576 true
577 } else {
578 false
579 }
580 }
581 };
582 // Should be run after the local-borrowed task is returned.
583 if enlist_success {
584 if indestructible {
585 do unkillable { f() }
586 } else {
587 f()
588 }
589 }
590 };
591
592 let mut task = if opts.sched.mode != SingleThreaded {
593 if opts.watched {
594 Task::build_child(opts.stack_size, child_wrapper)
595 } else {
596 Task::build_root(opts.stack_size, child_wrapper)
597 }
598 } else {
599 unsafe {
600 // Creating a 1:1 task:thread ...
601 let sched: *mut Scheduler = Local::unsafe_borrow();
602 let sched_handle = (*sched).make_handle();
603
604 // Since this is a 1:1 scheduler we create a queue not in
605 // the stealee set. The run_anything flag is set false
606 // which will disable stealing.
607 let work_queue = WorkQueue::new();
608
609 // Create a new scheduler to hold the new task
610 let new_loop = ~UvEventLoop::new();
611 let mut new_sched = ~Scheduler::new_special(new_loop,
612 work_queue,
613 (*sched).work_queues.clone(),
614 (*sched).sleeper_list.clone(),
615 false,
616 Some(sched_handle));
617 let mut new_sched_handle = new_sched.make_handle();
618
619 // Allow the scheduler to exit when the pinned task exits
620 new_sched_handle.send_shutdown();
621
622 // Pin the new task to the new scheduler
623 let new_task = if opts.watched {
624 Task::build_homed_child(opts.stack_size, child_wrapper, Sched(new_sched_handle))
625 } else {
626 Task::build_homed_root(opts.stack_size, child_wrapper, Sched(new_sched_handle))
627 };
628
629 // Create a task that will later be used to join with the new scheduler
630 // thread when it is ready to terminate
631 let (thread_port, thread_chan) = oneshot();
632 let thread_port_cell = Cell::new(thread_port);
633 let join_task = do Task::build_child(None) {
634 debug2!("running join task");
635 let thread_port = thread_port_cell.take();
636 let thread: Thread = thread_port.recv();
637 thread.join();
638 };
639
640 // Put the scheduler into another thread
641 let new_sched_cell = Cell::new(new_sched);
642 let orig_sched_handle_cell = Cell::new((*sched).make_handle());
643 let join_task_cell = Cell::new(join_task);
644
645 let thread = do Thread::start {
646 let mut new_sched = new_sched_cell.take();
647 let mut orig_sched_handle = orig_sched_handle_cell.take();
648 let join_task = join_task_cell.take();
649
650 let bootstrap_task = ~do Task::new_root(&mut new_sched.stack_pool, None) || {
651 debug2!("boostrapping a 1:1 scheduler");
652 };
653 new_sched.bootstrap(bootstrap_task);
654
655 debug2!("enqueing join_task");
656 // Now tell the original scheduler to join with this thread
657 // by scheduling a thread-joining task on the original scheduler
658 orig_sched_handle.send_task_from_friend(join_task);
659
660 // NB: We can't simply send a message from here to another task
661 // because this code isn't running in a task and message passing doesn't
662 // work outside of tasks. Hence we're sending a scheduler message
663 // to execute a new task directly to a scheduler.
664 };
665
666 // Give the thread handle to the join task
667 thread_chan.send(thread);
668
669 // When this task is enqueued on the current scheduler it will then get
670 // forwarded to the scheduler to which it is pinned
671 new_task
672 }
673 };
674
675 if opts.notify_chan.is_some() {
676 let notify_chan = opts.notify_chan.take_unwrap();
677 let notify_chan = Cell::new(notify_chan);
678 let on_exit: ~fn(bool) = |success| {
679 notify_chan.take().send(
680 if success { Success } else { Failure }
681 )
682 };
683 task.death.on_exit = Some(on_exit);
684 }
685
686 task.name = opts.name.take();
687 debug2!("spawn calling run_task");
688 Scheduler::run_task(task);
689
690 }
691
692 #[test]
693 fn test_spawn_raw_simple() {
694 let (po, ch) = stream();
695 do spawn_raw(default_task_opts()) {
696 ch.send(());
697 }
698 po.recv();
699 }
700
701 #[test]
702 fn test_spawn_raw_unsupervise() {
703 let opts = task::TaskOpts {
704 linked: false,
705 watched: false,
706 notify_chan: None,
707 .. default_task_opts()
708 };
709 do spawn_raw(opts) {
710 fail2!();
711 }
712 }
713
714 #[test]
715 fn test_spawn_raw_notify_success() {
716 let (notify_po, notify_ch) = comm::stream();
717
718 let opts = task::TaskOpts {
719 notify_chan: Some(notify_ch),
720 .. default_task_opts()
721 };
722 do spawn_raw(opts) {
723 }
724 assert_eq!(notify_po.recv(), Success);
725 }
726
727 #[test]
728 fn test_spawn_raw_notify_failure() {
729 // New bindings for these
730 let (notify_po, notify_ch) = comm::stream();
731
732 let opts = task::TaskOpts {
733 linked: false,
734 watched: false,
735 notify_chan: Some(notify_ch),
736 .. default_task_opts()
737 };
738 do spawn_raw(opts) {
739 fail2!();
740 }
741 assert_eq!(notify_po.recv(), Failure);
742 }
libstd/task/spawn.rs:308:26-308:26 -struct- definition:
// One of these per task.
pub struct Taskgroup {
references:-434: fn taskgroup_key() -> local_data::Key<@@mut Taskgroup> {
460: fn with_my_taskgroup<U>(blk: &fn(&Taskgroup) -> U) -> U {
354: mut notifier: Option<AutoNotify>) -> Taskgroup {
359: Taskgroup {
317: impl Drop for Taskgroup {
libstd/rt/task.rs:
50: taskgroup: Option<Taskgroup>,
libstd/rt/kill.rs:
565: pub fn collect_failure(&mut self, mut success: bool, group: Option<Taskgroup>) {
libstd/task/spawn.rs:554:1-554:1 -fn- definition:
pub fn spawn_raw(mut opts: TaskOpts, f: ~fn()) {
references:-libstd/task/mod.rs:
375: spawn::spawn_raw(opts, f);
libstd/task/spawn.rs:135:78-135:78 -fn- definition:
// A taskgroup is 'dead' when nothing can cause it to fail; only members can.
fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
references:-263: Some(ref tg) => taskgroup_is_dead(tg),
libstd/task/spawn.rs:487:81-487:81 -fn- definition:
// Returns 'None' in the case where the child's TG should be lazily initialized.
fn gen_child_taskgroup(linked: bool, supervised: bool)
references:-558: let child_data = Cell::new(gen_child_taskgroup(opts.linked, opts.supervised));
libstd/task/spawn.rs:180:28-180:28 -fn- definition:
#[inline] #[cfg(not(test))]
fn check_generation(_younger: uint, _older: uint) { }
references:-252: check_generation(last_generation, nobe.generation);
libstd/task/spawn.rs:384:1-384:1 -fn- definition:
fn enlist_in_taskgroup(state: TaskGroupInner, me: KillHandle,
references:-534: enlist_in_taskgroup(child_tg, child.clone(), true) // member
543: enlist_in_taskgroup(ancestor_tg, child.clone(), false)
libstd/task/spawn.rs:412:59-412:59 -fn- definition:
// NB: Runs in destructor/post-exit context. Can't 'fail'.
fn kill_taskgroup(state: Option<TaskGroupData>, me: &KillHandle) {
references:-333: kill_taskgroup(tg, me);
libstd/task/spawn.rs:438:18-438:18 -struct- definition:
// Transitionary.
struct RuntimeGlue;
references:-440: impl RuntimeGlue {
libstd/task/spawn.rs:399:59-399:59 -fn- definition:
// NB: Runs in destructor/post-exit context. Can't 'fail'.
fn leave_taskgroup(state: TaskGroupInner, me: &KillHandle, is_member: bool) {
references:-337: leave_taskgroup(tg, me, true);
345: leave_taskgroup(ancestor_group, me, false);
548: leave_taskgroup(child_tg, child, true); // member
538: let bail: &fn(TaskGroupInner) = |tg| { leave_taskgroup(tg, child, false) };
libstd/task/spawn.rs:187:28-187:28 -fn- definition:
#[inline] #[cfg(not(test))]
fn incr_generation(_ancestors: &AncestorList) -> uint { 0 }
references:-506: let new_generation = incr_generation(&ancestors);
libstd/task/spawn.rs:98:1-98:1 -struct- definition:
struct TaskSet(HashSet<KillHandle>);
references:-126: members: TaskSet,
103: fn new() -> TaskSet {
129: descendants: TaskSet,
101: impl TaskSet {
libstd/task/spawn.rs:365:1-365:1 -struct- definition:
struct AutoNotify {
references:-314: notifier: Option<AutoNotify>,
354: mut notifier: Option<AutoNotify>) -> Taskgroup {
379: AutoNotify {
378: fn AutoNotify(chan: Chan<TaskResult>) -> AutoNotify {
371: impl Drop for AutoNotify {
libstd/task/spawn.rs:130:2-130:2 -ty- definition:
}
type TaskGroupArc = Exclusive<Option<TaskGroupData>>;
references:-530: fn enlist_many(child: &KillHandle, child_arc: &TaskGroupArc,
489: -> Option<(TaskGroupArc, AncestorList)> {
164: fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
155: parent_group: TaskGroupArc,
352: pub fn Taskgroup(tasks: TaskGroupArc,
311: tasks: TaskGroupArc, // 'none' means the group has failed.
libstd/task/spawn.rs:351:1-351:1 -fn- definition:
pub fn Taskgroup(tasks: TaskGroupArc,
references:-477: let group = Taskgroup(tasks, AncestorList(None), None);
575: me.taskgroup = Some(Taskgroup(child_tg, ancestors, None));
libstd/task/spawn.rs:163:10-163:10 -fn- definition:
#[inline]
fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
references:-259: do access_group(&nobe.parent_group) |tg_opt| {
547: do access_group(child_arc) |child_tg| {
533: let mut result = do access_group(child_arc) |child_tg| {
336: do access_group(&self.tasks) |tg| {
288: do access_group(&nobe.parent_group) |tg_opt| {
330: let tg = do access_group(&self.tasks) |tg| { tg.take() };
libstd/task/spawn.rs:228:4-228:4 -fn- definition:
fn iterate(ancestors: &mut AncestorList,
bail_blk: &fn(TaskGroupInner),
references:-210: iterate(list, bail_blk, forward_blk, last_generation);
libstd/task/spawn.rs:170:10-170:10 -fn- definition:
#[inline]
fn access_ancestors<U>(x: &Exclusive<AncestorNode>,
references:-248: do access_ancestors(ancestor_arc) |nobe| {
libstd/task/spawn.rs:146:79-146:79 -struct- definition:
// die, their node in the list will be collected at a descendant's spawn-time.
struct AncestorNode {
references:-160: struct AncestorList(Option<Exclusive<AncestorNode>>);
171: fn access_ancestors<U>(x: &Exclusive<AncestorNode>,
510: AncestorList(Some(Exclusive::new(AncestorNode {
172: blk: &fn(x: &mut AncestorNode) -> U) -> U {
libstd/task/spawn.rs:132:1-132:1 -ty- definition:
type TaskGroupInner<'self> = &'self mut Option<TaskGroupData>;
references:-400: fn leave_taskgroup(state: TaskGroupInner, me: &KillHandle, is_member: bool) {
207: forward_blk: &fn(TaskGroupInner) -> bool,
229: bail_blk: &fn(TaskGroupInner),
164: fn access_group<U>(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U {
538: let bail: &fn(TaskGroupInner) = |tg| { leave_taskgroup(tg, child, false) };
230: forward_blk: &fn(TaskGroupInner) -> bool,
198: forward_blk: &fn(TaskGroupInner) -> bool)
206: bail_blk: &fn(TaskGroupInner),
197: bail_blk: &fn(TaskGroupInner),
385: fn enlist_in_taskgroup(state: TaskGroupInner, me: KillHandle,
libstd/task/spawn.rs:122:51-122:51 -struct- definition:
// One of these per group of linked-failure tasks.
struct TaskGroupData {
references:-501: let g = Exclusive::new(Some(TaskGroupData {
131: type TaskGroupArc = Exclusive<Option<TaskGroupData>>;
133: type TaskGroupInner<'self> = &'self mut Option<TaskGroupData>;
473: let tasks = Exclusive::new(Some(TaskGroupData {
417: do state.map |TaskGroupData { members: members, descendants: descendants }| {
413: fn kill_taskgroup(state: Option<TaskGroupData>, me: &KillHandle) {
136: fn taskgroup_is_dead(tg: &TaskGroupData) -> bool {
libstd/task/spawn.rs:205:4-205:4 -fn- definition:
fn coalesce(list: &mut AncestorList,
bail_blk: &fn(TaskGroupInner),
references:-201: return !coalesce(list, bail_blk, forward_blk, uint::max_value);
281: need_unwind = coalesce(&mut nobe.ancestors, |tg| bail_blk(tg),
libstd/task/spawn.rs:159:1-159:1 -struct- definition:
struct AncestorList(Option<Exclusive<AncestorNode>>);
references:-353: ancestors: AncestorList,
228: fn iterate(ancestors: &mut AncestorList,
489: -> Option<(TaskGroupArc, AncestorList)> {
531: ancestors: &mut AncestorList) -> bool {
313: ancestors: AncestorList,
157: ancestors: AncestorList,
205: fn coalesce(list: &mut AncestorList,
188: fn incr_generation(_ancestors: &AncestorList) -> uint { 0 }
196: fn each_ancestor(list: &mut AncestorList,
232: -> (Option<AncestorList>, bool) {
libstd/task/spawn.rs:195:74-195:74 -fn- definition:
// (3) As a bonus, coalesces away all 'dead' taskgroup nodes in the list.
fn each_ancestor(list: &mut AncestorList,
references:-540: result = do each_ancestor(ancestors, bail) |ancestor_tg| {
344: do each_ancestor(&mut self.ancestors, |_| {}) |ancestor_group| {
libstd/task/spawn.rs:529:71-529:71 -fn- definition:
// don't let the child task run, and undo every successful enlistment.
fn enlist_many(child: &KillHandle, child_arc: &TaskGroupArc,
references:-572: if enlist_many(handle, &child_tg, &mut ancestors) {