microblaze musl support
[official-gcc.git] / libgomp / task.c
blob1246c6ae3187fbeabd21a21bce634cbeef3a7d09
1 /* Copyright (C) 2007-2015 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the maintainence of tasks in response to task
27 creation and termination. */
29 #include "libgomp.h"
30 #include <stdlib.h>
31 #include <string.h>
32 #include "gomp-constants.h"
34 typedef struct gomp_task_depend_entry *hash_entry_type;
36 static inline void *
37 htab_alloc (size_t size)
39 return gomp_malloc (size);
42 static inline void
43 htab_free (void *ptr)
45 free (ptr);
48 #include "hashtab.h"
50 static inline hashval_t
51 htab_hash (hash_entry_type element)
53 return hash_pointer (element->addr);
56 static inline bool
57 htab_eq (hash_entry_type x, hash_entry_type y)
59 return x->addr == y->addr;
62 /* Create a new task data structure. */
64 void
65 gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
66 struct gomp_task_icv *prev_icv)
68 task->parent = parent_task;
69 task->icv = *prev_icv;
70 task->kind = GOMP_TASK_IMPLICIT;
71 task->taskwait = NULL;
72 task->in_tied_task = false;
73 task->final_task = false;
74 task->copy_ctors_done = false;
75 task->parent_depends_on = false;
76 task->children = NULL;
77 task->taskgroup = NULL;
78 task->dependers = NULL;
79 task->depend_hash = NULL;
80 task->depend_count = 0;
83 /* Clean up a task, after completing it. */
85 void
86 gomp_end_task (void)
88 struct gomp_thread *thr = gomp_thread ();
89 struct gomp_task *task = thr->task;
91 gomp_finish_task (task);
92 thr->task = task->parent;
95 /* Orphan the task in CHILDREN and all its siblings. */
97 static inline void
98 gomp_clear_parent (struct gomp_task *children)
100 struct gomp_task *task = children;
102 if (task)
105 task->parent = NULL;
106 task = task->next_child;
108 while (task != children);
111 /* Helper function for GOMP_task and gomp_create_target_task. Depend clause
112 handling for undeferred task creation. */
114 static void
115 gomp_task_handle_depend (struct gomp_task *task, struct gomp_task *parent,
116 void **depend)
118 size_t ndepend = (uintptr_t) depend[0];
119 size_t nout = (uintptr_t) depend[1];
120 size_t i;
121 hash_entry_type ent;
123 task->depend_count = ndepend;
124 task->num_dependees = 0;
125 if (parent->depend_hash == NULL)
126 parent->depend_hash = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12);
127 for (i = 0; i < ndepend; i++)
129 task->depend[i].addr = depend[2 + i];
130 task->depend[i].next = NULL;
131 task->depend[i].prev = NULL;
132 task->depend[i].task = task;
133 task->depend[i].is_in = i >= nout;
134 task->depend[i].redundant = false;
135 task->depend[i].redundant_out = false;
137 hash_entry_type *slot = htab_find_slot (&parent->depend_hash,
138 &task->depend[i], INSERT);
139 hash_entry_type out = NULL, last = NULL;
140 if (*slot)
142 /* If multiple depends on the same task are the same, all but the
143 first one are redundant. As inout/out come first, if any of them
144 is inout/out, it will win, which is the right semantics. */
145 if ((*slot)->task == task)
147 task->depend[i].redundant = true;
148 continue;
150 for (ent = *slot; ent; ent = ent->next)
152 if (ent->redundant_out)
153 break;
155 last = ent;
157 /* depend(in:...) doesn't depend on earlier depend(in:...). */
158 if (i >= nout && ent->is_in)
159 continue;
161 if (!ent->is_in)
162 out = ent;
164 struct gomp_task *tsk = ent->task;
165 if (tsk->dependers == NULL)
167 tsk->dependers
168 = gomp_malloc (sizeof (struct gomp_dependers_vec)
169 + 6 * sizeof (struct gomp_task *));
170 tsk->dependers->n_elem = 1;
171 tsk->dependers->allocated = 6;
172 tsk->dependers->elem[0] = task;
173 task->num_dependees++;
174 continue;
176 /* We already have some other dependency on tsk from earlier
177 depend clause. */
178 else if (tsk->dependers->n_elem
179 && (tsk->dependers->elem[tsk->dependers->n_elem - 1]
180 == task))
181 continue;
182 else if (tsk->dependers->n_elem == tsk->dependers->allocated)
184 tsk->dependers->allocated
185 = tsk->dependers->allocated * 2 + 2;
186 tsk->dependers
187 = gomp_realloc (tsk->dependers,
188 sizeof (struct gomp_dependers_vec)
189 + (tsk->dependers->allocated
190 * sizeof (struct gomp_task *)));
192 tsk->dependers->elem[tsk->dependers->n_elem++] = task;
193 task->num_dependees++;
195 task->depend[i].next = *slot;
196 (*slot)->prev = &task->depend[i];
198 *slot = &task->depend[i];
200 /* There is no need to store more than one depend({,in}out:) task per
201 address in the hash table chain for the purpose of creation of
202 deferred tasks, because each out depends on all earlier outs, thus it
203 is enough to record just the last depend({,in}out:). For depend(in:),
204 we need to keep all of the previous ones not terminated yet, because
205 a later depend({,in}out:) might need to depend on all of them. So, if
206 the new task's clause is depend({,in}out:), we know there is at most
207 one other depend({,in}out:) clause in the list (out). For
208 non-deferred tasks we want to see all outs, so they are moved to the
209 end of the chain, after first redundant_out entry all following
210 entries should be redundant_out. */
211 if (!task->depend[i].is_in && out)
213 if (out != last)
215 out->next->prev = out->prev;
216 out->prev->next = out->next;
217 out->next = last->next;
218 out->prev = last;
219 last->next = out;
220 if (out->next)
221 out->next->prev = out;
223 out->redundant_out = true;
228 /* Called when encountering an explicit task directive. If IF_CLAUSE is
229 false, then we must not delay in executing the task. If UNTIED is true,
230 then the task may be executed by any member of the team.
232 DEPEND is an array containing:
233 depend[0]: number of depend elements.
234 depend[1]: number of depend elements of type "out".
235 depend[2..N+1]: address of [1..N]th depend element. */
237 void
238 GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
239 long arg_size, long arg_align, bool if_clause, unsigned flags,
240 void **depend, int priority)
242 struct gomp_thread *thr = gomp_thread ();
243 struct gomp_team *team = thr->ts.team;
245 #ifdef HAVE_BROKEN_POSIX_SEMAPHORES
246 /* If pthread_mutex_* is used for omp_*lock*, then each task must be
247 tied to one thread all the time. This means UNTIED tasks must be
248 tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
249 might be running on different thread than FN. */
250 if (cpyfn)
251 if_clause = false;
252 flags &= ~GOMP_TASK_FLAG_UNTIED;
253 #endif
255 /* If parallel or taskgroup has been cancelled, don't start new tasks. */
256 if (team
257 && (gomp_team_barrier_cancelled (&team->barrier)
258 || (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
259 return;
261 if ((flags & GOMP_TASK_FLAG_PRIORITY) == 0)
262 priority = 0;
263 /* FIXME, use priority. */
264 (void) priority;
266 if (!if_clause || team == NULL
267 || (thr->task && thr->task->final_task)
268 || team->task_count > 64 * team->nthreads)
270 struct gomp_task task;
272 /* If there are depend clauses and earlier deferred sibling tasks
273 with depend clauses, check if there isn't a dependency. If there
274 is, we need to wait for them. There is no need to handle
275 depend clauses for non-deferred tasks other than this, because
276 the parent task is suspended until the child task finishes and thus
277 it can't start further child tasks. */
278 if ((flags & GOMP_TASK_FLAG_DEPEND)
279 && thr->task && thr->task->depend_hash)
280 gomp_task_maybe_wait_for_dependencies (depend);
282 gomp_init_task (&task, thr->task, gomp_icv (false));
283 task.kind = GOMP_TASK_UNDEFERRED;
284 task.final_task = (thr->task && thr->task->final_task)
285 || (flags & GOMP_TASK_FLAG_FINAL);
286 if (thr->task)
288 task.in_tied_task = thr->task->in_tied_task;
289 task.taskgroup = thr->task->taskgroup;
291 thr->task = &task;
292 if (__builtin_expect (cpyfn != NULL, 0))
294 char buf[arg_size + arg_align - 1];
295 char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
296 & ~(uintptr_t) (arg_align - 1));
297 cpyfn (arg, data);
298 fn (arg);
300 else
301 fn (data);
302 /* Access to "children" is normally done inside a task_lock
303 mutex region, but the only way this particular task.children
304 can be set is if this thread's task work function (fn)
305 creates children. So since the setter is *this* thread, we
306 need no barriers here when testing for non-NULL. We can have
307 task.children set by the current thread then changed by a
308 child thread, but seeing a stale non-NULL value is not a
309 problem. Once past the task_lock acquisition, this thread
310 will see the real value of task.children. */
311 if (task.children != NULL)
313 gomp_mutex_lock (&team->task_lock);
314 gomp_clear_parent (task.children);
315 gomp_mutex_unlock (&team->task_lock);
317 gomp_end_task ();
319 else
321 struct gomp_task *task;
322 struct gomp_task *parent = thr->task;
323 struct gomp_taskgroup *taskgroup = parent->taskgroup;
324 char *arg;
325 bool do_wake;
326 size_t depend_size = 0;
328 if (flags & GOMP_TASK_FLAG_DEPEND)
329 depend_size = ((uintptr_t) depend[0]
330 * sizeof (struct gomp_task_depend_entry));
331 task = gomp_malloc (sizeof (*task) + depend_size
332 + arg_size + arg_align - 1);
333 arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1)
334 & ~(uintptr_t) (arg_align - 1));
335 gomp_init_task (task, parent, gomp_icv (false));
336 task->kind = GOMP_TASK_UNDEFERRED;
337 task->in_tied_task = parent->in_tied_task;
338 task->taskgroup = taskgroup;
339 thr->task = task;
340 if (cpyfn)
342 cpyfn (arg, data);
343 task->copy_ctors_done = true;
345 else
346 memcpy (arg, data, arg_size);
347 thr->task = parent;
348 task->kind = GOMP_TASK_WAITING;
349 task->fn = fn;
350 task->fn_data = arg;
351 task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1;
352 gomp_mutex_lock (&team->task_lock);
353 /* If parallel or taskgroup has been cancelled, don't start new
354 tasks. */
355 if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier)
356 || (taskgroup && taskgroup->cancelled))
357 && !task->copy_ctors_done, 0))
359 gomp_mutex_unlock (&team->task_lock);
360 gomp_finish_task (task);
361 free (task);
362 return;
364 if (taskgroup)
365 taskgroup->num_children++;
366 if (depend_size)
368 gomp_task_handle_depend (task, parent, depend);
369 if (task->num_dependees)
371 gomp_mutex_unlock (&team->task_lock);
372 return;
375 if (parent->children)
377 task->next_child = parent->children;
378 task->prev_child = parent->children->prev_child;
379 task->next_child->prev_child = task;
380 task->prev_child->next_child = task;
382 else
384 task->next_child = task;
385 task->prev_child = task;
387 parent->children = task;
388 if (taskgroup)
390 /* If applicable, place task into its taskgroup. */
391 if (taskgroup->children)
393 task->next_taskgroup = taskgroup->children;
394 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
395 task->next_taskgroup->prev_taskgroup = task;
396 task->prev_taskgroup->next_taskgroup = task;
398 else
400 task->next_taskgroup = task;
401 task->prev_taskgroup = task;
403 taskgroup->children = task;
405 if (team->task_queue)
407 task->next_queue = team->task_queue;
408 task->prev_queue = team->task_queue->prev_queue;
409 task->next_queue->prev_queue = task;
410 task->prev_queue->next_queue = task;
412 else
414 task->next_queue = task;
415 task->prev_queue = task;
416 team->task_queue = task;
418 ++team->task_count;
419 ++team->task_queued_count;
420 gomp_team_barrier_set_task_pending (&team->barrier);
421 do_wake = team->task_running_count + !parent->in_tied_task
422 < team->nthreads;
423 gomp_mutex_unlock (&team->task_lock);
424 if (do_wake)
425 gomp_team_barrier_wake (&team->barrier, 1);
429 ialias (GOMP_taskgroup_start)
430 ialias (GOMP_taskgroup_end)
432 #define TYPE long
433 #define UTYPE unsigned long
434 #define TYPE_is_long 1
435 #include "taskloop.c"
436 #undef TYPE
437 #undef UTYPE
438 #undef TYPE_is_long
440 #define TYPE unsigned long long
441 #define UTYPE TYPE
442 #define GOMP_taskloop GOMP_taskloop_ull
443 #include "taskloop.c"
444 #undef TYPE
445 #undef UTYPE
446 #undef GOMP_taskloop
448 /* Called for nowait target tasks. */
450 void
451 gomp_create_target_task (struct gomp_device_descr *devicep,
452 void (*fn) (void *), size_t mapnum, void **hostaddrs,
453 size_t *sizes, unsigned short *kinds,
454 unsigned int flags, void **depend)
456 struct gomp_thread *thr = gomp_thread ();
457 struct gomp_team *team = thr->ts.team;
459 /* If parallel or taskgroup has been cancelled, don't start new tasks. */
460 if (team
461 && (gomp_team_barrier_cancelled (&team->barrier)
462 || (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
463 return;
465 struct gomp_target_task *ttask;
466 struct gomp_task *task;
467 struct gomp_task *parent = thr->task;
468 struct gomp_taskgroup *taskgroup = parent->taskgroup;
469 bool do_wake;
470 size_t depend_size = 0;
472 if (depend != NULL)
473 depend_size = ((uintptr_t) depend[0]
474 * sizeof (struct gomp_task_depend_entry));
475 task = gomp_malloc (sizeof (*task) + depend_size
476 + sizeof (*ttask)
477 + mapnum * (sizeof (void *) + sizeof (size_t)
478 + sizeof (unsigned short)));
479 gomp_init_task (task, parent, gomp_icv (false));
480 task->kind = GOMP_TASK_WAITING;
481 task->in_tied_task = parent->in_tied_task;
482 task->taskgroup = taskgroup;
483 ttask = (struct gomp_target_task *) &task->depend[(uintptr_t) depend[0]];
484 ttask->devicep = devicep;
485 ttask->fn = fn;
486 ttask->mapnum = mapnum;
487 memcpy (ttask->hostaddrs, hostaddrs, mapnum * sizeof (void *));
488 ttask->sizes = (size_t *) &ttask->hostaddrs[mapnum];
489 memcpy (ttask->sizes, sizes, mapnum * sizeof (size_t));
490 ttask->kinds = (unsigned short *) &ttask->sizes[mapnum];
491 memcpy (ttask->kinds, kinds, mapnum * sizeof (unsigned short));
492 ttask->flags = flags;
493 task->fn = gomp_target_task_fn;
494 task->fn_data = ttask;
495 task->final_task = 0;
496 gomp_mutex_lock (&team->task_lock);
497 /* If parallel or taskgroup has been cancelled, don't start new tasks. */
498 if (__builtin_expect (gomp_team_barrier_cancelled (&team->barrier)
499 || (taskgroup && taskgroup->cancelled), 0))
501 gomp_mutex_unlock (&team->task_lock);
502 gomp_finish_task (task);
503 free (task);
504 return;
506 if (taskgroup)
507 taskgroup->num_children++;
508 if (depend_size)
510 gomp_task_handle_depend (task, parent, depend);
511 if (task->num_dependees)
513 gomp_mutex_unlock (&team->task_lock);
514 return;
517 if (parent->children)
519 task->next_child = parent->children;
520 task->prev_child = parent->children->prev_child;
521 task->next_child->prev_child = task;
522 task->prev_child->next_child = task;
524 else
526 task->next_child = task;
527 task->prev_child = task;
529 parent->children = task;
530 if (taskgroup)
532 /* If applicable, place task into its taskgroup. */
533 if (taskgroup->children)
535 task->next_taskgroup = taskgroup->children;
536 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
537 task->next_taskgroup->prev_taskgroup = task;
538 task->prev_taskgroup->next_taskgroup = task;
540 else
542 task->next_taskgroup = task;
543 task->prev_taskgroup = task;
545 taskgroup->children = task;
547 if (team->task_queue)
549 task->next_queue = team->task_queue;
550 task->prev_queue = team->task_queue->prev_queue;
551 task->next_queue->prev_queue = task;
552 task->prev_queue->next_queue = task;
554 else
556 task->next_queue = task;
557 task->prev_queue = task;
558 team->task_queue = task;
560 ++team->task_count;
561 ++team->task_queued_count;
562 gomp_team_barrier_set_task_pending (&team->barrier);
563 do_wake = team->task_running_count + !parent->in_tied_task
564 < team->nthreads;
565 gomp_mutex_unlock (&team->task_lock);
566 if (do_wake)
567 gomp_team_barrier_wake (&team->barrier, 1);
570 #if _LIBGOMP_CHECKING
571 /* Sanity check TASK to make sure it is in its parent's children
572 queue, and that the tasks therein are in the right order.
574 The expected order is:
575 parent_depends_on WAITING tasks
576 !parent_depends_on WAITING tasks
577 TIED tasks
579 PARENT is the alleged parent of TASK. */
581 static void
582 verify_children_queue (struct gomp_task *task, struct gomp_task *parent)
584 if (task->parent != parent)
585 gomp_fatal ("verify_children_queue: incompatible parents");
586 /* It's OK, Annie was an orphan and she turned out all right. */
587 if (!parent)
588 return;
590 bool seen_tied = false;
591 bool seen_plain_waiting = false;
592 bool found = false;
593 struct gomp_task *t = parent->children;
594 while (1)
596 if (t == task)
597 found = true;
598 if (seen_tied && t->kind == GOMP_TASK_WAITING)
599 gomp_fatal ("verify_children_queue: WAITING task after TIED");
600 if (t->kind == GOMP_TASK_TIED)
601 seen_tied = true;
602 else if (t->kind == GOMP_TASK_WAITING)
604 if (t->parent_depends_on)
606 if (seen_plain_waiting)
607 gomp_fatal ("verify_children_queue: parent_depends_on after "
608 "!parent_depends_on");
610 else
611 seen_plain_waiting = true;
613 t = t->next_child;
614 if (t == parent->children)
615 break;
617 if (!found)
618 gomp_fatal ("verify_children_queue: child not found in parent queue");
621 /* Sanity check TASK to make sure it is in its taskgroup queue (if
622 applicable), and that the tasks therein are in the right order.
624 The expected order is that GOMP_TASK_WAITING tasks must come before
625 GOMP_TASK_TIED tasks.
627 TASK is the task. */
629 static void
630 verify_taskgroup_queue (struct gomp_task *task)
632 struct gomp_taskgroup *taskgroup = task->taskgroup;
633 if (!taskgroup)
634 return;
636 bool seen_tied = false;
637 bool found = false;
638 struct gomp_task *t = taskgroup->children;
639 while (1)
641 if (t == task)
642 found = true;
643 if (t->kind == GOMP_TASK_WAITING && seen_tied)
644 gomp_fatal ("verify_taskgroup_queue: WAITING task after TIED");
645 if (t->kind == GOMP_TASK_TIED)
646 seen_tied = true;
647 t = t->next_taskgroup;
648 if (t == taskgroup->children)
649 break;
651 if (!found)
652 gomp_fatal ("verify_taskgroup_queue: child not found in parent queue");
655 /* Verify that TASK is in the team's task queue. */
657 static void
658 verify_task_queue (struct gomp_task *task, struct gomp_team *team)
660 struct gomp_task *t = team->task_queue;
661 if (team)
662 while (1)
664 if (t == task)
665 return;
666 t = t->next_queue;
667 if (t == team->task_queue)
668 break;
670 gomp_fatal ("verify_team_queue: child not in team");
672 #endif
674 static inline bool
675 gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent,
676 struct gomp_team *team)
678 #if _LIBGOMP_CHECKING
679 verify_children_queue (child_task, parent);
680 verify_taskgroup_queue (child_task);
681 verify_task_queue (child_task, team);
682 #endif
684 if (parent)
686 /* Adjust children such that it will point to a next child,
687 while the current one is scheduled to be executed. This way,
688 GOMP_taskwait (and others) can schedule a next task while
689 waiting.
691 Do not remove it entirely from the circular list, as it is
692 still a child, though not one we should consider first (say
693 by GOMP_taskwait). */
694 if (parent->children == child_task)
695 parent->children = child_task->next_child;
696 /* TIED tasks cannot come before WAITING tasks. If we're about
697 to make this task TIED, rewire things appropriately.
698 However, a TIED task at the end is perfectly fine. */
699 else if (child_task->next_child->kind == GOMP_TASK_WAITING
700 && child_task->next_child != parent->children)
702 /* Remove from the list. */
703 child_task->prev_child->next_child = child_task->next_child;
704 child_task->next_child->prev_child = child_task->prev_child;
705 /* Rewire at the end of its siblings. */
706 child_task->next_child = parent->children;
707 child_task->prev_child = parent->children->prev_child;
708 parent->children->prev_child->next_child = child_task;
709 parent->children->prev_child = child_task;
712 /* If the current task (child_task) is at the top of the
713 parent's last_parent_depends_on, it's about to be removed
714 from it. Adjust last_parent_depends_on appropriately. */
715 if (__builtin_expect (child_task->parent_depends_on, 0)
716 && parent->taskwait->last_parent_depends_on == child_task)
718 /* The last_parent_depends_on list was built with all
719 parent_depends_on entries linked to the prev_child. Grab
720 the next last_parent_depends_on head from this prev_child if
721 available... */
722 if (child_task->prev_child->kind == GOMP_TASK_WAITING
723 && child_task->prev_child->parent_depends_on)
724 parent->taskwait->last_parent_depends_on = child_task->prev_child;
725 else
727 /* ...otherwise, there are no more parent_depends_on
728 entries waiting to run. In which case, clear the
729 list. */
730 parent->taskwait->last_parent_depends_on = NULL;
735 /* Adjust taskgroup to point to the next taskgroup. See note above
736 regarding adjustment of children as to why the child_task is not
737 removed entirely from the circular list. */
738 struct gomp_taskgroup *taskgroup = child_task->taskgroup;
739 if (taskgroup)
741 if (taskgroup->children == child_task)
742 taskgroup->children = child_task->next_taskgroup;
743 /* TIED tasks cannot come before WAITING tasks. If we're about
744 to make this task TIED, rewire things appropriately.
745 However, a TIED task at the end is perfectly fine. */
746 else if (child_task->next_taskgroup->kind == GOMP_TASK_WAITING
747 && child_task->next_taskgroup != taskgroup->children)
749 /* Remove from the list. */
750 child_task->prev_taskgroup->next_taskgroup
751 = child_task->next_taskgroup;
752 child_task->next_taskgroup->prev_taskgroup
753 = child_task->prev_taskgroup;
754 /* Rewire at the end of its taskgroup. */
755 child_task->next_taskgroup = taskgroup->children;
756 child_task->prev_taskgroup = taskgroup->children->prev_taskgroup;
757 taskgroup->children->prev_taskgroup->next_taskgroup = child_task;
758 taskgroup->children->prev_taskgroup = child_task;
762 /* Remove child_task from the task_queue. */
763 child_task->prev_queue->next_queue = child_task->next_queue;
764 child_task->next_queue->prev_queue = child_task->prev_queue;
765 if (team->task_queue == child_task)
767 if (child_task->next_queue != child_task)
768 team->task_queue = child_task->next_queue;
769 else
770 team->task_queue = NULL;
772 child_task->kind = GOMP_TASK_TIED;
774 if (--team->task_queued_count == 0)
775 gomp_team_barrier_clear_task_pending (&team->barrier);
776 if ((gomp_team_barrier_cancelled (&team->barrier)
777 || (taskgroup && taskgroup->cancelled))
778 && !child_task->copy_ctors_done)
779 return true;
780 return false;
783 static void
784 gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task)
786 struct gomp_task *parent = child_task->parent;
787 size_t i;
789 for (i = 0; i < child_task->depend_count; i++)
790 if (!child_task->depend[i].redundant)
792 if (child_task->depend[i].next)
793 child_task->depend[i].next->prev = child_task->depend[i].prev;
794 if (child_task->depend[i].prev)
795 child_task->depend[i].prev->next = child_task->depend[i].next;
796 else
798 hash_entry_type *slot
799 = htab_find_slot (&parent->depend_hash, &child_task->depend[i],
800 NO_INSERT);
801 if (*slot != &child_task->depend[i])
802 abort ();
803 if (child_task->depend[i].next)
804 *slot = child_task->depend[i].next;
805 else
806 htab_clear_slot (parent->depend_hash, slot);
811 /* After CHILD_TASK has been run, adjust the various task queues to
812 give higher priority to the tasks that depend on CHILD_TASK.
814 TEAM is the team to which CHILD_TASK belongs to. */
816 static size_t
817 gomp_task_run_post_handle_dependers (struct gomp_task *child_task,
818 struct gomp_team *team)
820 struct gomp_task *parent = child_task->parent;
821 size_t i, count = child_task->dependers->n_elem, ret = 0;
822 for (i = 0; i < count; i++)
824 struct gomp_task *task = child_task->dependers->elem[i];
825 if (--task->num_dependees != 0)
826 continue;
828 struct gomp_taskgroup *taskgroup = task->taskgroup;
829 if (parent)
831 if (parent->children)
833 /* If parent is in gomp_task_maybe_wait_for_dependencies
834 and it doesn't need to wait for this task, put it after
835 all ready to run tasks it needs to wait for. */
836 if (parent->taskwait && parent->taskwait->last_parent_depends_on
837 && !task->parent_depends_on)
839 /* Put depender in last_parent_depends_on. */
840 struct gomp_task *last_parent_depends_on
841 = parent->taskwait->last_parent_depends_on;
842 task->next_child = last_parent_depends_on->next_child;
843 task->prev_child = last_parent_depends_on;
845 else
847 /* Make depender a sibling of child_task, and place
848 it at the top of said sibling list. */
849 task->next_child = parent->children;
850 task->prev_child = parent->children->prev_child;
851 parent->children = task;
853 task->next_child->prev_child = task;
854 task->prev_child->next_child = task;
856 else
858 /* Make depender a sibling of child_task. */
859 task->next_child = task;
860 task->prev_child = task;
861 parent->children = task;
863 if (parent->taskwait)
865 if (parent->taskwait->in_taskwait)
867 parent->taskwait->in_taskwait = false;
868 gomp_sem_post (&parent->taskwait->taskwait_sem);
870 else if (parent->taskwait->in_depend_wait)
872 parent->taskwait->in_depend_wait = false;
873 gomp_sem_post (&parent->taskwait->taskwait_sem);
875 if (parent->taskwait->last_parent_depends_on == NULL
876 && task->parent_depends_on)
877 parent->taskwait->last_parent_depends_on = task;
880 /* If depender is in a taskgroup, put it at the TOP of its
881 taskgroup. */
882 if (taskgroup)
884 if (taskgroup->children)
886 task->next_taskgroup = taskgroup->children;
887 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
888 task->next_taskgroup->prev_taskgroup = task;
889 task->prev_taskgroup->next_taskgroup = task;
891 else
893 task->next_taskgroup = task;
894 task->prev_taskgroup = task;
896 taskgroup->children = task;
897 if (taskgroup->in_taskgroup_wait)
899 taskgroup->in_taskgroup_wait = false;
900 gomp_sem_post (&taskgroup->taskgroup_sem);
903 /* Put depender of child_task at the END of the team's
904 task_queue. */
905 if (team->task_queue)
907 task->next_queue = team->task_queue;
908 task->prev_queue = team->task_queue->prev_queue;
909 task->next_queue->prev_queue = task;
910 task->prev_queue->next_queue = task;
912 else
914 task->next_queue = task;
915 task->prev_queue = task;
916 team->task_queue = task;
918 ++team->task_count;
919 ++team->task_queued_count;
920 ++ret;
922 free (child_task->dependers);
923 child_task->dependers = NULL;
924 if (ret > 1)
925 gomp_team_barrier_set_task_pending (&team->barrier);
926 return ret;
929 static inline size_t
930 gomp_task_run_post_handle_depend (struct gomp_task *child_task,
931 struct gomp_team *team)
933 if (child_task->depend_count == 0)
934 return 0;
936 /* If parent is gone already, the hash table is freed and nothing
937 will use the hash table anymore, no need to remove anything from it. */
938 if (child_task->parent != NULL)
939 gomp_task_run_post_handle_depend_hash (child_task);
941 if (child_task->dependers == NULL)
942 return 0;
944 return gomp_task_run_post_handle_dependers (child_task, team);
947 /* Remove CHILD_TASK from its parent. */
949 static inline void
950 gomp_task_run_post_remove_parent (struct gomp_task *child_task)
952 struct gomp_task *parent = child_task->parent;
953 if (parent == NULL)
954 return;
956 /* If this was the last task the parent was depending on,
957 synchronize with gomp_task_maybe_wait_for_dependencies so it can
958 clean up and return. */
959 if (__builtin_expect (child_task->parent_depends_on, 0)
960 && --parent->taskwait->n_depend == 0
961 && parent->taskwait->in_depend_wait)
963 parent->taskwait->in_depend_wait = false;
964 gomp_sem_post (&parent->taskwait->taskwait_sem);
967 /* Remove CHILD_TASK from its sibling list. */
968 child_task->prev_child->next_child = child_task->next_child;
969 child_task->next_child->prev_child = child_task->prev_child;
970 if (parent->children != child_task)
971 return;
972 if (child_task->next_child != child_task)
973 parent->children = child_task->next_child;
974 else
976 /* We access task->children in GOMP_taskwait
977 outside of the task lock mutex region, so
978 need a release barrier here to ensure memory
979 written by child_task->fn above is flushed
980 before the NULL is written. */
981 __atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE);
982 if (parent->taskwait && parent->taskwait->in_taskwait)
984 parent->taskwait->in_taskwait = false;
985 gomp_sem_post (&parent->taskwait->taskwait_sem);
990 /* Remove CHILD_TASK from its taskgroup. */
992 static inline void
993 gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
995 struct gomp_taskgroup *taskgroup = child_task->taskgroup;
996 if (taskgroup == NULL)
997 return;
998 child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup;
999 child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup;
1000 if (taskgroup->num_children > 1)
1001 --taskgroup->num_children;
1002 else
1004 /* We access taskgroup->num_children in GOMP_taskgroup_end
1005 outside of the task lock mutex region, so
1006 need a release barrier here to ensure memory
1007 written by child_task->fn above is flushed
1008 before the NULL is written. */
1009 __atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
1011 if (taskgroup->children != child_task)
1012 return;
1013 if (child_task->next_taskgroup != child_task)
1014 taskgroup->children = child_task->next_taskgroup;
1015 else
1017 taskgroup->children = NULL;
1018 if (taskgroup->in_taskgroup_wait)
1020 taskgroup->in_taskgroup_wait = false;
1021 gomp_sem_post (&taskgroup->taskgroup_sem);
1026 void
1027 gomp_barrier_handle_tasks (gomp_barrier_state_t state)
1029 struct gomp_thread *thr = gomp_thread ();
1030 struct gomp_team *team = thr->ts.team;
1031 struct gomp_task *task = thr->task;
1032 struct gomp_task *child_task = NULL;
1033 struct gomp_task *to_free = NULL;
1034 int do_wake = 0;
1036 gomp_mutex_lock (&team->task_lock);
1037 if (gomp_barrier_last_thread (state))
1039 if (team->task_count == 0)
1041 gomp_team_barrier_done (&team->barrier, state);
1042 gomp_mutex_unlock (&team->task_lock);
1043 gomp_team_barrier_wake (&team->barrier, 0);
1044 return;
1046 gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
1049 while (1)
1051 bool cancelled = false;
1052 if (team->task_queue != NULL)
1054 child_task = team->task_queue;
1055 cancelled = gomp_task_run_pre (child_task, child_task->parent,
1056 team);
1057 if (__builtin_expect (cancelled, 0))
1059 if (to_free)
1061 gomp_finish_task (to_free);
1062 free (to_free);
1063 to_free = NULL;
1065 goto finish_cancelled;
1067 team->task_running_count++;
1068 child_task->in_tied_task = true;
1070 gomp_mutex_unlock (&team->task_lock);
1071 if (do_wake)
1073 gomp_team_barrier_wake (&team->barrier, do_wake);
1074 do_wake = 0;
1076 if (to_free)
1078 gomp_finish_task (to_free);
1079 free (to_free);
1080 to_free = NULL;
1082 if (child_task)
1084 thr->task = child_task;
1085 child_task->fn (child_task->fn_data);
1086 thr->task = task;
1088 else
1089 return;
1090 gomp_mutex_lock (&team->task_lock);
1091 if (child_task)
1093 finish_cancelled:;
1094 size_t new_tasks
1095 = gomp_task_run_post_handle_depend (child_task, team);
1096 gomp_task_run_post_remove_parent (child_task);
1097 gomp_clear_parent (child_task->children);
1098 gomp_task_run_post_remove_taskgroup (child_task);
1099 to_free = child_task;
1100 child_task = NULL;
1101 if (!cancelled)
1102 team->task_running_count--;
1103 if (new_tasks > 1)
1105 do_wake = team->nthreads - team->task_running_count;
1106 if (do_wake > new_tasks)
1107 do_wake = new_tasks;
1109 if (--team->task_count == 0
1110 && gomp_team_barrier_waiting_for_tasks (&team->barrier))
1112 gomp_team_barrier_done (&team->barrier, state);
1113 gomp_mutex_unlock (&team->task_lock);
1114 gomp_team_barrier_wake (&team->barrier, 0);
1115 gomp_mutex_lock (&team->task_lock);
1121 /* Called when encountering a taskwait directive.
1123 Wait for all children of the current task. */
1125 void
1126 GOMP_taskwait (void)
1128 struct gomp_thread *thr = gomp_thread ();
1129 struct gomp_team *team = thr->ts.team;
1130 struct gomp_task *task = thr->task;
1131 struct gomp_task *child_task = NULL;
1132 struct gomp_task *to_free = NULL;
1133 struct gomp_taskwait taskwait;
1134 int do_wake = 0;
1136 /* The acquire barrier on load of task->children here synchronizes
1137 with the write of a NULL in gomp_task_run_post_remove_parent. It is
1138 not necessary that we synchronize with other non-NULL writes at
1139 this point, but we must ensure that all writes to memory by a
1140 child thread task work function are seen before we exit from
1141 GOMP_taskwait. */
1142 if (task == NULL
1143 || __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL)
1144 return;
1146 memset (&taskwait, 0, sizeof (taskwait));
1147 gomp_mutex_lock (&team->task_lock);
1148 while (1)
1150 bool cancelled = false;
1151 if (task->children == NULL)
1153 bool destroy_taskwait = task->taskwait != NULL;
1154 task->taskwait = NULL;
1155 gomp_mutex_unlock (&team->task_lock);
1156 if (to_free)
1158 gomp_finish_task (to_free);
1159 free (to_free);
1161 if (destroy_taskwait)
1162 gomp_sem_destroy (&taskwait.taskwait_sem);
1163 return;
1165 if (task->children->kind == GOMP_TASK_WAITING)
1167 child_task = task->children;
1168 cancelled
1169 = gomp_task_run_pre (child_task, task, team);
1170 if (__builtin_expect (cancelled, 0))
1172 if (to_free)
1174 gomp_finish_task (to_free);
1175 free (to_free);
1176 to_free = NULL;
1178 goto finish_cancelled;
1181 else
1183 /* All tasks we are waiting for are already running
1184 in other threads. Wait for them. */
1185 if (task->taskwait == NULL)
1187 taskwait.in_depend_wait = false;
1188 gomp_sem_init (&taskwait.taskwait_sem, 0);
1189 task->taskwait = &taskwait;
1191 taskwait.in_taskwait = true;
1193 gomp_mutex_unlock (&team->task_lock);
1194 if (do_wake)
1196 gomp_team_barrier_wake (&team->barrier, do_wake);
1197 do_wake = 0;
1199 if (to_free)
1201 gomp_finish_task (to_free);
1202 free (to_free);
1203 to_free = NULL;
1205 if (child_task)
1207 thr->task = child_task;
1208 child_task->fn (child_task->fn_data);
1209 thr->task = task;
1211 else
1212 gomp_sem_wait (&taskwait.taskwait_sem);
1213 gomp_mutex_lock (&team->task_lock);
1214 if (child_task)
1216 finish_cancelled:;
1217 size_t new_tasks
1218 = gomp_task_run_post_handle_depend (child_task, team);
1220 /* Remove child_task from children list, and set up the next
1221 sibling to be run. */
1222 child_task->prev_child->next_child = child_task->next_child;
1223 child_task->next_child->prev_child = child_task->prev_child;
1224 if (task->children == child_task)
1226 if (child_task->next_child != child_task)
1227 task->children = child_task->next_child;
1228 else
1229 task->children = NULL;
1231 /* Orphan all the children of CHILD_TASK. */
1232 gomp_clear_parent (child_task->children);
1234 /* Remove CHILD_TASK from its taskgroup. */
1235 gomp_task_run_post_remove_taskgroup (child_task);
1237 to_free = child_task;
1238 child_task = NULL;
1239 team->task_count--;
1240 if (new_tasks > 1)
1242 do_wake = team->nthreads - team->task_running_count
1243 - !task->in_tied_task;
1244 if (do_wake > new_tasks)
1245 do_wake = new_tasks;
1251 /* This is like GOMP_taskwait, but we only wait for tasks that the
1252 upcoming task depends on.
1254 DEPEND is as in GOMP_task. */
1256 void
1257 gomp_task_maybe_wait_for_dependencies (void **depend)
1259 struct gomp_thread *thr = gomp_thread ();
1260 struct gomp_task *task = thr->task;
1261 struct gomp_team *team = thr->ts.team;
1262 struct gomp_task_depend_entry elem, *ent = NULL;
1263 struct gomp_taskwait taskwait;
1264 struct gomp_task *last_parent_depends_on = NULL;
1265 size_t ndepend = (uintptr_t) depend[0];
1266 size_t nout = (uintptr_t) depend[1];
1267 size_t i;
1268 size_t num_awaited = 0;
1269 struct gomp_task *child_task = NULL;
1270 struct gomp_task *to_free = NULL;
1271 int do_wake = 0;
1273 gomp_mutex_lock (&team->task_lock);
1274 for (i = 0; i < ndepend; i++)
1276 elem.addr = depend[i + 2];
1277 ent = htab_find (task->depend_hash, &elem);
1278 for (; ent; ent = ent->next)
1279 if (i >= nout && ent->is_in)
1280 continue;
1281 else
1283 struct gomp_task *tsk = ent->task;
1284 if (!tsk->parent_depends_on)
1286 tsk->parent_depends_on = true;
1287 ++num_awaited;
1288 /* If a task we need to wait for is not already
1289 running and is ready to be scheduled, move it to
1290 front, so that we run it as soon as possible.
1292 We rearrange the children queue such that all
1293 parent_depends_on tasks are first, and
1294 last_parent_depends_on points to the last such task
1295 we rearranged. For example, given the following
1296 children where PD[123] are the parent_depends_on
1297 tasks:
1299 task->children
1302 C1 -> C2 -> C3 -> PD1 -> PD2 -> PD3 -> C4
1304 We rearrange such that:
1306 task->children
1307 | +--- last_parent_depends_on
1310 PD1 -> PD2 -> PD3 -> C1 -> C2 -> C3 -> C4
1313 if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING)
1315 if (last_parent_depends_on)
1317 tsk->prev_child->next_child = tsk->next_child;
1318 tsk->next_child->prev_child = tsk->prev_child;
1319 tsk->prev_child = last_parent_depends_on;
1320 tsk->next_child = last_parent_depends_on->next_child;
1321 tsk->prev_child->next_child = tsk;
1322 tsk->next_child->prev_child = tsk;
1324 else if (tsk != task->children)
1326 tsk->prev_child->next_child = tsk->next_child;
1327 tsk->next_child->prev_child = tsk->prev_child;
1328 tsk->prev_child = task->children->prev_child;
1329 tsk->next_child = task->children;
1330 task->children = tsk;
1331 tsk->prev_child->next_child = tsk;
1332 tsk->next_child->prev_child = tsk;
1334 last_parent_depends_on = tsk;
1339 if (num_awaited == 0)
1341 gomp_mutex_unlock (&team->task_lock);
1342 return;
1345 memset (&taskwait, 0, sizeof (taskwait));
1346 taskwait.n_depend = num_awaited;
1347 taskwait.last_parent_depends_on = last_parent_depends_on;
1348 gomp_sem_init (&taskwait.taskwait_sem, 0);
1349 task->taskwait = &taskwait;
1351 while (1)
1353 bool cancelled = false;
1354 if (taskwait.n_depend == 0)
1356 task->taskwait = NULL;
1357 gomp_mutex_unlock (&team->task_lock);
1358 if (to_free)
1360 gomp_finish_task (to_free);
1361 free (to_free);
1363 gomp_sem_destroy (&taskwait.taskwait_sem);
1364 return;
1366 if (task->children->kind == GOMP_TASK_WAITING)
1368 child_task = task->children;
1369 cancelled
1370 = gomp_task_run_pre (child_task, task, team);
1371 if (__builtin_expect (cancelled, 0))
1373 if (to_free)
1375 gomp_finish_task (to_free);
1376 free (to_free);
1377 to_free = NULL;
1379 goto finish_cancelled;
1382 else
1383 /* All tasks we are waiting for are already running
1384 in other threads. Wait for them. */
1385 taskwait.in_depend_wait = true;
1386 gomp_mutex_unlock (&team->task_lock);
1387 if (do_wake)
1389 gomp_team_barrier_wake (&team->barrier, do_wake);
1390 do_wake = 0;
1392 if (to_free)
1394 gomp_finish_task (to_free);
1395 free (to_free);
1396 to_free = NULL;
1398 if (child_task)
1400 thr->task = child_task;
1401 child_task->fn (child_task->fn_data);
1402 thr->task = task;
1404 else
1405 gomp_sem_wait (&taskwait.taskwait_sem);
1406 gomp_mutex_lock (&team->task_lock);
1407 if (child_task)
1409 finish_cancelled:;
1410 size_t new_tasks
1411 = gomp_task_run_post_handle_depend (child_task, team);
1412 if (child_task->parent_depends_on)
1413 --taskwait.n_depend;
1415 /* Remove child_task from sibling list. */
1416 child_task->prev_child->next_child = child_task->next_child;
1417 child_task->next_child->prev_child = child_task->prev_child;
1418 if (task->children == child_task)
1420 if (child_task->next_child != child_task)
1421 task->children = child_task->next_child;
1422 else
1423 task->children = NULL;
1426 gomp_clear_parent (child_task->children);
1427 gomp_task_run_post_remove_taskgroup (child_task);
1428 to_free = child_task;
1429 child_task = NULL;
1430 team->task_count--;
1431 if (new_tasks > 1)
1433 do_wake = team->nthreads - team->task_running_count
1434 - !task->in_tied_task;
1435 if (do_wake > new_tasks)
1436 do_wake = new_tasks;
1442 /* Called when encountering a taskyield directive. */
1444 void
1445 GOMP_taskyield (void)
1447 /* Nothing at the moment. */
1450 void
1451 GOMP_taskgroup_start (void)
1453 struct gomp_thread *thr = gomp_thread ();
1454 struct gomp_team *team = thr->ts.team;
1455 struct gomp_task *task = thr->task;
1456 struct gomp_taskgroup *taskgroup;
1458 /* If team is NULL, all tasks are executed as
1459 GOMP_TASK_UNDEFERRED tasks and thus all children tasks of
1460 taskgroup and their descendant tasks will be finished
1461 by the time GOMP_taskgroup_end is called. */
1462 if (team == NULL)
1463 return;
1464 taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup));
1465 taskgroup->prev = task->taskgroup;
1466 taskgroup->children = NULL;
1467 taskgroup->in_taskgroup_wait = false;
1468 taskgroup->cancelled = false;
1469 taskgroup->num_children = 0;
1470 gomp_sem_init (&taskgroup->taskgroup_sem, 0);
1471 task->taskgroup = taskgroup;
1474 void
1475 GOMP_taskgroup_end (void)
1477 struct gomp_thread *thr = gomp_thread ();
1478 struct gomp_team *team = thr->ts.team;
1479 struct gomp_task *task = thr->task;
1480 struct gomp_taskgroup *taskgroup;
1481 struct gomp_task *child_task = NULL;
1482 struct gomp_task *to_free = NULL;
1483 int do_wake = 0;
1485 if (team == NULL)
1486 return;
1487 taskgroup = task->taskgroup;
1489 /* The acquire barrier on load of taskgroup->num_children here
1490 synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup.
1491 It is not necessary that we synchronize with other non-0 writes at
1492 this point, but we must ensure that all writes to memory by a
1493 child thread task work function are seen before we exit from
1494 GOMP_taskgroup_end. */
1495 if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0)
1496 goto finish;
1498 gomp_mutex_lock (&team->task_lock);
1499 while (1)
1501 bool cancelled = false;
1502 if (taskgroup->children == NULL)
1504 if (taskgroup->num_children)
1506 if (task->children == NULL)
1507 goto do_wait;
1508 child_task = task->children;
1510 else
1512 gomp_mutex_unlock (&team->task_lock);
1513 if (to_free)
1515 gomp_finish_task (to_free);
1516 free (to_free);
1518 goto finish;
1521 else
1522 child_task = taskgroup->children;
1523 if (child_task->kind == GOMP_TASK_WAITING)
1525 cancelled
1526 = gomp_task_run_pre (child_task, child_task->parent, team);
1527 if (__builtin_expect (cancelled, 0))
1529 if (to_free)
1531 gomp_finish_task (to_free);
1532 free (to_free);
1533 to_free = NULL;
1535 goto finish_cancelled;
1538 else
1540 child_task = NULL;
1541 do_wait:
1542 /* All tasks we are waiting for are already running
1543 in other threads. Wait for them. */
1544 taskgroup->in_taskgroup_wait = true;
1546 gomp_mutex_unlock (&team->task_lock);
1547 if (do_wake)
1549 gomp_team_barrier_wake (&team->barrier, do_wake);
1550 do_wake = 0;
1552 if (to_free)
1554 gomp_finish_task (to_free);
1555 free (to_free);
1556 to_free = NULL;
1558 if (child_task)
1560 thr->task = child_task;
1561 child_task->fn (child_task->fn_data);
1562 thr->task = task;
1564 else
1565 gomp_sem_wait (&taskgroup->taskgroup_sem);
1566 gomp_mutex_lock (&team->task_lock);
1567 if (child_task)
1569 finish_cancelled:;
1570 size_t new_tasks
1571 = gomp_task_run_post_handle_depend (child_task, team);
1572 gomp_task_run_post_remove_parent (child_task);
1573 gomp_clear_parent (child_task->children);
1574 gomp_task_run_post_remove_taskgroup (child_task);
1575 to_free = child_task;
1576 child_task = NULL;
1577 team->task_count--;
1578 if (new_tasks > 1)
1580 do_wake = team->nthreads - team->task_running_count
1581 - !task->in_tied_task;
1582 if (do_wake > new_tasks)
1583 do_wake = new_tasks;
1588 finish:
1589 task->taskgroup = taskgroup->prev;
1590 gomp_sem_destroy (&taskgroup->taskgroup_sem);
1591 free (taskgroup);
1595 omp_in_final (void)
1597 struct gomp_thread *thr = gomp_thread ();
1598 return thr->task && thr->task->final_task;
1601 ialias (omp_in_final)