* gimplify.c (gimplify_scan_omp_clauses): Initialize sc
[official-gcc.git] / libgomp / task.c
blob65df17875e5b5cf3f0c15e94cea01ec24bb92dec
1 /* Copyright (C) 2007-2015 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the maintainence of tasks in response to task
27 creation and termination. */
29 #include "libgomp.h"
30 #include <stdlib.h>
31 #include <string.h>
32 #include "gomp-constants.h"
34 typedef struct gomp_task_depend_entry *hash_entry_type;
36 static inline void *
37 htab_alloc (size_t size)
39 return gomp_malloc (size);
42 static inline void
43 htab_free (void *ptr)
45 free (ptr);
48 #include "hashtab.h"
50 static inline hashval_t
51 htab_hash (hash_entry_type element)
53 return hash_pointer (element->addr);
56 static inline bool
57 htab_eq (hash_entry_type x, hash_entry_type y)
59 return x->addr == y->addr;
62 /* Create a new task data structure. */
64 void
65 gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
66 struct gomp_task_icv *prev_icv)
68 task->parent = parent_task;
69 task->icv = *prev_icv;
70 task->kind = GOMP_TASK_IMPLICIT;
71 task->taskwait = NULL;
72 task->in_tied_task = false;
73 task->final_task = false;
74 task->copy_ctors_done = false;
75 task->parent_depends_on = false;
76 task->children = NULL;
77 task->taskgroup = NULL;
78 task->dependers = NULL;
79 task->depend_hash = NULL;
80 task->depend_count = 0;
83 /* Clean up a task, after completing it. */
85 void
86 gomp_end_task (void)
88 struct gomp_thread *thr = gomp_thread ();
89 struct gomp_task *task = thr->task;
91 gomp_finish_task (task);
92 thr->task = task->parent;
95 /* Orphan the task in CHILDREN and all its siblings. */
97 static inline void
98 gomp_clear_parent (struct gomp_task *children)
100 struct gomp_task *task = children;
102 if (task)
105 task->parent = NULL;
106 task = task->next_child;
108 while (task != children);
111 /* Called when encountering an explicit task directive. If IF_CLAUSE is
112 false, then we must not delay in executing the task. If UNTIED is true,
113 then the task may be executed by any member of the team.
115 DEPEND is an array containing:
116 depend[0]: number of depend elements.
117 depend[1]: number of depend elements of type "out".
118 depend[2..N+1]: address of [1..N]th depend element. */
120 void
121 GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
122 long arg_size, long arg_align, bool if_clause, unsigned flags,
123 void **depend, int priority)
125 struct gomp_thread *thr = gomp_thread ();
126 struct gomp_team *team = thr->ts.team;
128 #ifdef HAVE_BROKEN_POSIX_SEMAPHORES
129 /* If pthread_mutex_* is used for omp_*lock*, then each task must be
130 tied to one thread all the time. This means UNTIED tasks must be
131 tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
132 might be running on different thread than FN. */
133 if (cpyfn)
134 if_clause = false;
135 flags &= ~GOMP_TASK_FLAG_UNTIED;
136 #endif
138 /* If parallel or taskgroup has been cancelled, don't start new tasks. */
139 if (team
140 && (gomp_team_barrier_cancelled (&team->barrier)
141 || (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
142 return;
144 if ((flags & GOMP_TASK_FLAG_PRIORITY) == 0)
145 priority = 0;
146 /* FIXME, use priority. */
147 (void) priority;
149 if (!if_clause || team == NULL
150 || (thr->task && thr->task->final_task)
151 || team->task_count > 64 * team->nthreads)
153 struct gomp_task task;
155 /* If there are depend clauses and earlier deferred sibling tasks
156 with depend clauses, check if there isn't a dependency. If there
157 is, we need to wait for them. There is no need to handle
158 depend clauses for non-deferred tasks other than this, because
159 the parent task is suspended until the child task finishes and thus
160 it can't start further child tasks. */
161 if ((flags & GOMP_TASK_FLAG_DEPEND)
162 && thr->task && thr->task->depend_hash)
163 gomp_task_maybe_wait_for_dependencies (depend);
165 gomp_init_task (&task, thr->task, gomp_icv (false));
166 task.kind = GOMP_TASK_UNDEFERRED;
167 task.final_task = (thr->task && thr->task->final_task)
168 || (flags & GOMP_TASK_FLAG_FINAL);
169 if (thr->task)
171 task.in_tied_task = thr->task->in_tied_task;
172 task.taskgroup = thr->task->taskgroup;
174 thr->task = &task;
175 if (__builtin_expect (cpyfn != NULL, 0))
177 char buf[arg_size + arg_align - 1];
178 char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
179 & ~(uintptr_t) (arg_align - 1));
180 cpyfn (arg, data);
181 fn (arg);
183 else
184 fn (data);
185 /* Access to "children" is normally done inside a task_lock
186 mutex region, but the only way this particular task.children
187 can be set is if this thread's task work function (fn)
188 creates children. So since the setter is *this* thread, we
189 need no barriers here when testing for non-NULL. We can have
190 task.children set by the current thread then changed by a
191 child thread, but seeing a stale non-NULL value is not a
192 problem. Once past the task_lock acquisition, this thread
193 will see the real value of task.children. */
194 if (task.children != NULL)
196 gomp_mutex_lock (&team->task_lock);
197 gomp_clear_parent (task.children);
198 gomp_mutex_unlock (&team->task_lock);
200 gomp_end_task ();
202 else
204 struct gomp_task *task;
205 struct gomp_task *parent = thr->task;
206 struct gomp_taskgroup *taskgroup = parent->taskgroup;
207 char *arg;
208 bool do_wake;
209 size_t depend_size = 0;
211 if (flags & GOMP_TASK_FLAG_DEPEND)
212 depend_size = ((uintptr_t) depend[0]
213 * sizeof (struct gomp_task_depend_entry));
214 task = gomp_malloc (sizeof (*task) + depend_size
215 + arg_size + arg_align - 1);
216 arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1)
217 & ~(uintptr_t) (arg_align - 1));
218 gomp_init_task (task, parent, gomp_icv (false));
219 task->kind = GOMP_TASK_UNDEFERRED;
220 task->in_tied_task = parent->in_tied_task;
221 task->taskgroup = taskgroup;
222 thr->task = task;
223 if (cpyfn)
225 cpyfn (arg, data);
226 task->copy_ctors_done = true;
228 else
229 memcpy (arg, data, arg_size);
230 thr->task = parent;
231 task->kind = GOMP_TASK_WAITING;
232 task->fn = fn;
233 task->fn_data = arg;
234 task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1;
235 gomp_mutex_lock (&team->task_lock);
236 /* If parallel or taskgroup has been cancelled, don't start new
237 tasks. */
238 if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier)
239 || (taskgroup && taskgroup->cancelled))
240 && !task->copy_ctors_done, 0))
242 gomp_mutex_unlock (&team->task_lock);
243 gomp_finish_task (task);
244 free (task);
245 return;
247 if (taskgroup)
248 taskgroup->num_children++;
249 if (depend_size)
251 size_t ndepend = (uintptr_t) depend[0];
252 size_t nout = (uintptr_t) depend[1];
253 size_t i;
254 hash_entry_type ent;
256 task->depend_count = ndepend;
257 task->num_dependees = 0;
258 if (parent->depend_hash == NULL)
259 parent->depend_hash
260 = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12);
261 for (i = 0; i < ndepend; i++)
263 task->depend[i].addr = depend[2 + i];
264 task->depend[i].next = NULL;
265 task->depend[i].prev = NULL;
266 task->depend[i].task = task;
267 task->depend[i].is_in = i >= nout;
268 task->depend[i].redundant = false;
269 task->depend[i].redundant_out = false;
271 hash_entry_type *slot
272 = htab_find_slot (&parent->depend_hash, &task->depend[i],
273 INSERT);
274 hash_entry_type out = NULL, last = NULL;
275 if (*slot)
277 /* If multiple depends on the same task are the
278 same, all but the first one are redundant.
279 As inout/out come first, if any of them is
280 inout/out, it will win, which is the right
281 semantics. */
282 if ((*slot)->task == task)
284 task->depend[i].redundant = true;
285 continue;
287 for (ent = *slot; ent; ent = ent->next)
289 if (ent->redundant_out)
290 break;
292 last = ent;
294 /* depend(in:...) doesn't depend on earlier
295 depend(in:...). */
296 if (i >= nout && ent->is_in)
297 continue;
299 if (!ent->is_in)
300 out = ent;
302 struct gomp_task *tsk = ent->task;
303 if (tsk->dependers == NULL)
305 tsk->dependers
306 = gomp_malloc (sizeof (struct gomp_dependers_vec)
307 + 6 * sizeof (struct gomp_task *));
308 tsk->dependers->n_elem = 1;
309 tsk->dependers->allocated = 6;
310 tsk->dependers->elem[0] = task;
311 task->num_dependees++;
312 continue;
314 /* We already have some other dependency on tsk
315 from earlier depend clause. */
316 else if (tsk->dependers->n_elem
317 && (tsk->dependers->elem[tsk->dependers->n_elem
318 - 1]
319 == task))
320 continue;
321 else if (tsk->dependers->n_elem
322 == tsk->dependers->allocated)
324 tsk->dependers->allocated
325 = tsk->dependers->allocated * 2 + 2;
326 tsk->dependers
327 = gomp_realloc (tsk->dependers,
328 sizeof (struct gomp_dependers_vec)
329 + (tsk->dependers->allocated
330 * sizeof (struct gomp_task *)));
332 tsk->dependers->elem[tsk->dependers->n_elem++] = task;
333 task->num_dependees++;
335 task->depend[i].next = *slot;
336 (*slot)->prev = &task->depend[i];
338 *slot = &task->depend[i];
340 /* There is no need to store more than one depend({,in}out:)
341 task per address in the hash table chain for the purpose
342 of creation of deferred tasks, because each out
343 depends on all earlier outs, thus it is enough to record
344 just the last depend({,in}out:). For depend(in:), we need
345 to keep all of the previous ones not terminated yet, because
346 a later depend({,in}out:) might need to depend on all of
347 them. So, if the new task's clause is depend({,in}out:),
348 we know there is at most one other depend({,in}out:) clause
349 in the list (out). For non-deferred tasks we want to see
350 all outs, so they are moved to the end of the chain,
351 after first redundant_out entry all following entries
352 should be redundant_out. */
353 if (!task->depend[i].is_in && out)
355 if (out != last)
357 out->next->prev = out->prev;
358 out->prev->next = out->next;
359 out->next = last->next;
360 out->prev = last;
361 last->next = out;
362 if (out->next)
363 out->next->prev = out;
365 out->redundant_out = true;
368 if (task->num_dependees)
370 gomp_mutex_unlock (&team->task_lock);
371 return;
374 if (parent->children)
376 task->next_child = parent->children;
377 task->prev_child = parent->children->prev_child;
378 task->next_child->prev_child = task;
379 task->prev_child->next_child = task;
381 else
383 task->next_child = task;
384 task->prev_child = task;
386 parent->children = task;
387 if (taskgroup)
389 /* If applicable, place task into its taskgroup. */
390 if (taskgroup->children)
392 task->next_taskgroup = taskgroup->children;
393 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
394 task->next_taskgroup->prev_taskgroup = task;
395 task->prev_taskgroup->next_taskgroup = task;
397 else
399 task->next_taskgroup = task;
400 task->prev_taskgroup = task;
402 taskgroup->children = task;
404 if (team->task_queue)
406 task->next_queue = team->task_queue;
407 task->prev_queue = team->task_queue->prev_queue;
408 task->next_queue->prev_queue = task;
409 task->prev_queue->next_queue = task;
411 else
413 task->next_queue = task;
414 task->prev_queue = task;
415 team->task_queue = task;
417 ++team->task_count;
418 ++team->task_queued_count;
419 gomp_team_barrier_set_task_pending (&team->barrier);
420 do_wake = team->task_running_count + !parent->in_tied_task
421 < team->nthreads;
422 gomp_mutex_unlock (&team->task_lock);
423 if (do_wake)
424 gomp_team_barrier_wake (&team->barrier, 1);
428 ialias (GOMP_taskgroup_start)
429 ialias (GOMP_taskgroup_end)
431 #define TYPE long
432 #define UTYPE unsigned long
433 #define TYPE_is_long 1
434 #include "taskloop.c"
435 #undef TYPE
436 #undef UTYPE
437 #undef TYPE_is_long
439 #define TYPE unsigned long long
440 #define UTYPE TYPE
441 #define GOMP_taskloop GOMP_taskloop_ull
442 #include "taskloop.c"
443 #undef TYPE
444 #undef UTYPE
445 #undef GOMP_taskloop
447 static inline bool
448 gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent,
449 struct gomp_taskgroup *taskgroup, struct gomp_team *team)
451 if (parent)
453 /* Adjust children such that it will point to a next child,
454 while the current one is scheduled to be executed. This way,
455 GOMP_taskwait (and others) can schedule a next task while
456 waiting.
458 Do not remove it entirely from the circular list, as it is
459 still a child, though not one we should consider first (say
460 by GOMP_taskwait). */
461 if (parent->children == child_task)
462 parent->children = child_task->next_child;
464 /* If the current task (child_task) is at the top of the
465 parent's last_parent_depends_on, it's about to be removed
466 from it. Adjust last_parent_depends_on appropriately. */
467 if (__builtin_expect (child_task->parent_depends_on, 0)
468 && parent->taskwait->last_parent_depends_on == child_task)
470 /* The last_parent_depends_on list was built with all
471 parent_depends_on entries linked to the prev_child. Grab
472 the next last_parent_depends_on head from this prev_child if
473 available... */
474 if (child_task->prev_child->kind == GOMP_TASK_WAITING
475 && child_task->prev_child->parent_depends_on)
476 parent->taskwait->last_parent_depends_on = child_task->prev_child;
477 else
479 /* ...otherwise, there are no more parent_depends_on
480 entries waiting to run. In which case, clear the
481 list. */
482 parent->taskwait->last_parent_depends_on = NULL;
487 /* Adjust taskgroup to point to the next taskgroup. See note above
488 regarding adjustment of children as to why the child_task is not
489 removed entirely from the circular list. */
490 if (taskgroup && taskgroup->children == child_task)
491 taskgroup->children = child_task->next_taskgroup;
493 /* Remove child_task from the task_queue. */
494 child_task->prev_queue->next_queue = child_task->next_queue;
495 child_task->next_queue->prev_queue = child_task->prev_queue;
496 if (team->task_queue == child_task)
498 if (child_task->next_queue != child_task)
499 team->task_queue = child_task->next_queue;
500 else
501 team->task_queue = NULL;
503 child_task->kind = GOMP_TASK_TIED;
505 if (--team->task_queued_count == 0)
506 gomp_team_barrier_clear_task_pending (&team->barrier);
507 if ((gomp_team_barrier_cancelled (&team->barrier)
508 || (taskgroup && taskgroup->cancelled))
509 && !child_task->copy_ctors_done)
510 return true;
511 return false;
514 static void
515 gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task)
517 struct gomp_task *parent = child_task->parent;
518 size_t i;
520 for (i = 0; i < child_task->depend_count; i++)
521 if (!child_task->depend[i].redundant)
523 if (child_task->depend[i].next)
524 child_task->depend[i].next->prev = child_task->depend[i].prev;
525 if (child_task->depend[i].prev)
526 child_task->depend[i].prev->next = child_task->depend[i].next;
527 else
529 hash_entry_type *slot
530 = htab_find_slot (&parent->depend_hash, &child_task->depend[i],
531 NO_INSERT);
532 if (*slot != &child_task->depend[i])
533 abort ();
534 if (child_task->depend[i].next)
535 *slot = child_task->depend[i].next;
536 else
537 htab_clear_slot (parent->depend_hash, slot);
542 /* After CHILD_TASK has been run, adjust the various task queues to
543 give higher priority to the tasks that depend on CHILD_TASK.
545 TEAM is the team to which CHILD_TASK belongs to. */
547 static size_t
548 gomp_task_run_post_handle_dependers (struct gomp_task *child_task,
549 struct gomp_team *team)
551 struct gomp_task *parent = child_task->parent;
552 size_t i, count = child_task->dependers->n_elem, ret = 0;
553 for (i = 0; i < count; i++)
555 struct gomp_task *task = child_task->dependers->elem[i];
556 if (--task->num_dependees != 0)
557 continue;
559 struct gomp_taskgroup *taskgroup = task->taskgroup;
560 if (parent)
562 if (parent->children)
564 /* If parent is in gomp_task_maybe_wait_for_dependencies
565 and it doesn't need to wait for this task, put it after
566 all ready to run tasks it needs to wait for. */
567 if (parent->taskwait && parent->taskwait->last_parent_depends_on
568 && !task->parent_depends_on)
570 /* Put depender in last_parent_depends_on. */
571 struct gomp_task *last_parent_depends_on
572 = parent->taskwait->last_parent_depends_on;
573 task->next_child = last_parent_depends_on->next_child;
574 task->prev_child = last_parent_depends_on;
576 else
578 /* Make depender a sibling of child_task, and place
579 it at the top of said sibling list. */
580 task->next_child = parent->children;
581 task->prev_child = parent->children->prev_child;
582 parent->children = task;
584 task->next_child->prev_child = task;
585 task->prev_child->next_child = task;
587 else
589 /* Make depender a sibling of child_task. */
590 task->next_child = task;
591 task->prev_child = task;
592 parent->children = task;
594 if (parent->taskwait)
596 if (parent->taskwait->in_taskwait)
598 parent->taskwait->in_taskwait = false;
599 gomp_sem_post (&parent->taskwait->taskwait_sem);
601 else if (parent->taskwait->in_depend_wait)
603 parent->taskwait->in_depend_wait = false;
604 gomp_sem_post (&parent->taskwait->taskwait_sem);
606 if (parent->taskwait->last_parent_depends_on == NULL
607 && task->parent_depends_on)
608 parent->taskwait->last_parent_depends_on = task;
611 /* If depender is in a taskgroup, put it at the TOP of its
612 taskgroup. */
613 if (taskgroup)
615 if (taskgroup->children)
617 task->next_taskgroup = taskgroup->children;
618 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
619 task->next_taskgroup->prev_taskgroup = task;
620 task->prev_taskgroup->next_taskgroup = task;
622 else
624 task->next_taskgroup = task;
625 task->prev_taskgroup = task;
627 taskgroup->children = task;
628 if (taskgroup->in_taskgroup_wait)
630 taskgroup->in_taskgroup_wait = false;
631 gomp_sem_post (&taskgroup->taskgroup_sem);
634 /* Put depender of child_task at the END of the team's
635 task_queue. */
636 if (team->task_queue)
638 task->next_queue = team->task_queue;
639 task->prev_queue = team->task_queue->prev_queue;
640 task->next_queue->prev_queue = task;
641 task->prev_queue->next_queue = task;
643 else
645 task->next_queue = task;
646 task->prev_queue = task;
647 team->task_queue = task;
649 ++team->task_count;
650 ++team->task_queued_count;
651 ++ret;
653 free (child_task->dependers);
654 child_task->dependers = NULL;
655 if (ret > 1)
656 gomp_team_barrier_set_task_pending (&team->barrier);
657 return ret;
660 static inline size_t
661 gomp_task_run_post_handle_depend (struct gomp_task *child_task,
662 struct gomp_team *team)
664 if (child_task->depend_count == 0)
665 return 0;
667 /* If parent is gone already, the hash table is freed and nothing
668 will use the hash table anymore, no need to remove anything from it. */
669 if (child_task->parent != NULL)
670 gomp_task_run_post_handle_depend_hash (child_task);
672 if (child_task->dependers == NULL)
673 return 0;
675 return gomp_task_run_post_handle_dependers (child_task, team);
678 /* Remove CHILD_TASK from its parent. */
680 static inline void
681 gomp_task_run_post_remove_parent (struct gomp_task *child_task)
683 struct gomp_task *parent = child_task->parent;
684 if (parent == NULL)
685 return;
687 /* If this was the last task the parent was depending on,
688 synchronize with gomp_task_maybe_wait_for_dependencies so it can
689 clean up and return. */
690 if (__builtin_expect (child_task->parent_depends_on, 0)
691 && --parent->taskwait->n_depend == 0
692 && parent->taskwait->in_depend_wait)
694 parent->taskwait->in_depend_wait = false;
695 gomp_sem_post (&parent->taskwait->taskwait_sem);
698 /* Remove CHILD_TASK from its sibling list. */
699 child_task->prev_child->next_child = child_task->next_child;
700 child_task->next_child->prev_child = child_task->prev_child;
701 if (parent->children != child_task)
702 return;
703 if (child_task->next_child != child_task)
704 parent->children = child_task->next_child;
705 else
707 /* We access task->children in GOMP_taskwait
708 outside of the task lock mutex region, so
709 need a release barrier here to ensure memory
710 written by child_task->fn above is flushed
711 before the NULL is written. */
712 __atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE);
713 if (parent->taskwait && parent->taskwait->in_taskwait)
715 parent->taskwait->in_taskwait = false;
716 gomp_sem_post (&parent->taskwait->taskwait_sem);
721 /* Remove CHILD_TASK from its taskgroup. */
723 static inline void
724 gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
726 struct gomp_taskgroup *taskgroup = child_task->taskgroup;
727 if (taskgroup == NULL)
728 return;
729 child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup;
730 child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup;
731 if (taskgroup->num_children > 1)
732 --taskgroup->num_children;
733 else
735 /* We access taskgroup->num_children in GOMP_taskgroup_end
736 outside of the task lock mutex region, so
737 need a release barrier here to ensure memory
738 written by child_task->fn above is flushed
739 before the NULL is written. */
740 __atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
742 if (taskgroup->children != child_task)
743 return;
744 if (child_task->next_taskgroup != child_task)
745 taskgroup->children = child_task->next_taskgroup;
746 else
748 taskgroup->children = NULL;
749 if (taskgroup->in_taskgroup_wait)
751 taskgroup->in_taskgroup_wait = false;
752 gomp_sem_post (&taskgroup->taskgroup_sem);
757 void
758 gomp_barrier_handle_tasks (gomp_barrier_state_t state)
760 struct gomp_thread *thr = gomp_thread ();
761 struct gomp_team *team = thr->ts.team;
762 struct gomp_task *task = thr->task;
763 struct gomp_task *child_task = NULL;
764 struct gomp_task *to_free = NULL;
765 int do_wake = 0;
767 gomp_mutex_lock (&team->task_lock);
768 if (gomp_barrier_last_thread (state))
770 if (team->task_count == 0)
772 gomp_team_barrier_done (&team->barrier, state);
773 gomp_mutex_unlock (&team->task_lock);
774 gomp_team_barrier_wake (&team->barrier, 0);
775 return;
777 gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
780 while (1)
782 bool cancelled = false;
783 if (team->task_queue != NULL)
785 child_task = team->task_queue;
786 cancelled = gomp_task_run_pre (child_task, child_task->parent,
787 child_task->taskgroup, team);
788 if (__builtin_expect (cancelled, 0))
790 if (to_free)
792 gomp_finish_task (to_free);
793 free (to_free);
794 to_free = NULL;
796 goto finish_cancelled;
798 team->task_running_count++;
799 child_task->in_tied_task = true;
801 gomp_mutex_unlock (&team->task_lock);
802 if (do_wake)
804 gomp_team_barrier_wake (&team->barrier, do_wake);
805 do_wake = 0;
807 if (to_free)
809 gomp_finish_task (to_free);
810 free (to_free);
811 to_free = NULL;
813 if (child_task)
815 thr->task = child_task;
816 child_task->fn (child_task->fn_data);
817 thr->task = task;
819 else
820 return;
821 gomp_mutex_lock (&team->task_lock);
822 if (child_task)
824 finish_cancelled:;
825 size_t new_tasks
826 = gomp_task_run_post_handle_depend (child_task, team);
827 gomp_task_run_post_remove_parent (child_task);
828 gomp_clear_parent (child_task->children);
829 gomp_task_run_post_remove_taskgroup (child_task);
830 to_free = child_task;
831 child_task = NULL;
832 if (!cancelled)
833 team->task_running_count--;
834 if (new_tasks > 1)
836 do_wake = team->nthreads - team->task_running_count;
837 if (do_wake > new_tasks)
838 do_wake = new_tasks;
840 if (--team->task_count == 0
841 && gomp_team_barrier_waiting_for_tasks (&team->barrier))
843 gomp_team_barrier_done (&team->barrier, state);
844 gomp_mutex_unlock (&team->task_lock);
845 gomp_team_barrier_wake (&team->barrier, 0);
846 gomp_mutex_lock (&team->task_lock);
852 /* Called when encountering a taskwait directive.
854 Wait for all children of the current task. */
856 void
857 GOMP_taskwait (void)
859 struct gomp_thread *thr = gomp_thread ();
860 struct gomp_team *team = thr->ts.team;
861 struct gomp_task *task = thr->task;
862 struct gomp_task *child_task = NULL;
863 struct gomp_task *to_free = NULL;
864 struct gomp_taskwait taskwait;
865 int do_wake = 0;
867 /* The acquire barrier on load of task->children here synchronizes
868 with the write of a NULL in gomp_task_run_post_remove_parent. It is
869 not necessary that we synchronize with other non-NULL writes at
870 this point, but we must ensure that all writes to memory by a
871 child thread task work function are seen before we exit from
872 GOMP_taskwait. */
873 if (task == NULL
874 || __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL)
875 return;
877 memset (&taskwait, 0, sizeof (taskwait));
878 gomp_mutex_lock (&team->task_lock);
879 while (1)
881 bool cancelled = false;
882 if (task->children == NULL)
884 bool destroy_taskwait = task->taskwait != NULL;
885 task->taskwait = NULL;
886 gomp_mutex_unlock (&team->task_lock);
887 if (to_free)
889 gomp_finish_task (to_free);
890 free (to_free);
892 if (destroy_taskwait)
893 gomp_sem_destroy (&taskwait.taskwait_sem);
894 return;
896 if (task->children->kind == GOMP_TASK_WAITING)
898 child_task = task->children;
899 cancelled
900 = gomp_task_run_pre (child_task, task, child_task->taskgroup,
901 team);
902 if (__builtin_expect (cancelled, 0))
904 if (to_free)
906 gomp_finish_task (to_free);
907 free (to_free);
908 to_free = NULL;
910 goto finish_cancelled;
913 else
915 /* All tasks we are waiting for are already running
916 in other threads. Wait for them. */
917 if (task->taskwait == NULL)
919 taskwait.in_depend_wait = false;
920 gomp_sem_init (&taskwait.taskwait_sem, 0);
921 task->taskwait = &taskwait;
923 taskwait.in_taskwait = true;
925 gomp_mutex_unlock (&team->task_lock);
926 if (do_wake)
928 gomp_team_barrier_wake (&team->barrier, do_wake);
929 do_wake = 0;
931 if (to_free)
933 gomp_finish_task (to_free);
934 free (to_free);
935 to_free = NULL;
937 if (child_task)
939 thr->task = child_task;
940 child_task->fn (child_task->fn_data);
941 thr->task = task;
943 else
944 gomp_sem_wait (&taskwait.taskwait_sem);
945 gomp_mutex_lock (&team->task_lock);
946 if (child_task)
948 finish_cancelled:;
949 size_t new_tasks
950 = gomp_task_run_post_handle_depend (child_task, team);
952 /* Remove child_task from children list, and set up the next
953 sibling to be run. */
954 child_task->prev_child->next_child = child_task->next_child;
955 child_task->next_child->prev_child = child_task->prev_child;
956 if (task->children == child_task)
958 if (child_task->next_child != child_task)
959 task->children = child_task->next_child;
960 else
961 task->children = NULL;
963 /* Orphan all the children of CHILD_TASK. */
964 gomp_clear_parent (child_task->children);
966 /* Remove CHILD_TASK from its taskgroup. */
967 gomp_task_run_post_remove_taskgroup (child_task);
969 to_free = child_task;
970 child_task = NULL;
971 team->task_count--;
972 if (new_tasks > 1)
974 do_wake = team->nthreads - team->task_running_count
975 - !task->in_tied_task;
976 if (do_wake > new_tasks)
977 do_wake = new_tasks;
983 /* This is like GOMP_taskwait, but we only wait for tasks that the
984 upcoming task depends on.
986 DEPEND is as in GOMP_task. */
988 void
989 gomp_task_maybe_wait_for_dependencies (void **depend)
991 struct gomp_thread *thr = gomp_thread ();
992 struct gomp_task *task = thr->task;
993 struct gomp_team *team = thr->ts.team;
994 struct gomp_task_depend_entry elem, *ent = NULL;
995 struct gomp_taskwait taskwait;
996 struct gomp_task *last_parent_depends_on = NULL;
997 size_t ndepend = (uintptr_t) depend[0];
998 size_t nout = (uintptr_t) depend[1];
999 size_t i;
1000 size_t num_awaited = 0;
1001 struct gomp_task *child_task = NULL;
1002 struct gomp_task *to_free = NULL;
1003 int do_wake = 0;
1005 gomp_mutex_lock (&team->task_lock);
1006 for (i = 0; i < ndepend; i++)
1008 elem.addr = depend[i + 2];
1009 ent = htab_find (task->depend_hash, &elem);
1010 for (; ent; ent = ent->next)
1011 if (i >= nout && ent->is_in)
1012 continue;
1013 else
1015 struct gomp_task *tsk = ent->task;
1016 if (!tsk->parent_depends_on)
1018 tsk->parent_depends_on = true;
1019 ++num_awaited;
1020 if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING)
1022 /* If a task we need to wait for is not already
1023 running and is ready to be scheduled, move it
1024 to front, so that we run it as soon as possible. */
1025 if (last_parent_depends_on)
1027 /* Remove tsk from the sibling list... */
1028 tsk->prev_child->next_child = tsk->next_child;
1029 tsk->next_child->prev_child = tsk->prev_child;
1030 /* ...and insert it into last_parent_depends_on. */
1031 tsk->prev_child = last_parent_depends_on;
1032 tsk->next_child = last_parent_depends_on->next_child;
1033 tsk->prev_child->next_child = tsk;
1034 tsk->next_child->prev_child = tsk;
1036 else if (tsk != task->children)
1038 /* Remove tsk from the sibling list... */
1039 tsk->prev_child->next_child = tsk->next_child;
1040 tsk->next_child->prev_child = tsk->prev_child;
1041 /* ...and insert it into the running task's
1042 children. */
1043 tsk->prev_child = task->children;
1044 tsk->next_child = task->children->next_child;
1045 task->children = tsk;
1046 tsk->prev_child->next_child = tsk;
1047 tsk->next_child->prev_child = tsk;
1049 else
1051 /* It's already in task->children. Nothing to do. */;
1053 last_parent_depends_on = tsk;
1058 if (num_awaited == 0)
1060 gomp_mutex_unlock (&team->task_lock);
1061 return;
1064 memset (&taskwait, 0, sizeof (taskwait));
1065 taskwait.n_depend = num_awaited;
1066 taskwait.last_parent_depends_on = last_parent_depends_on;
1067 gomp_sem_init (&taskwait.taskwait_sem, 0);
1068 task->taskwait = &taskwait;
1070 while (1)
1072 bool cancelled = false;
1073 if (taskwait.n_depend == 0)
1075 task->taskwait = NULL;
1076 gomp_mutex_unlock (&team->task_lock);
1077 if (to_free)
1079 gomp_finish_task (to_free);
1080 free (to_free);
1082 gomp_sem_destroy (&taskwait.taskwait_sem);
1083 return;
1085 if (task->children->kind == GOMP_TASK_WAITING)
1087 child_task = task->children;
1088 cancelled
1089 = gomp_task_run_pre (child_task, task, child_task->taskgroup,
1090 team);
1091 if (__builtin_expect (cancelled, 0))
1093 if (to_free)
1095 gomp_finish_task (to_free);
1096 free (to_free);
1097 to_free = NULL;
1099 goto finish_cancelled;
1102 else
1103 /* All tasks we are waiting for are already running
1104 in other threads. Wait for them. */
1105 taskwait.in_depend_wait = true;
1106 gomp_mutex_unlock (&team->task_lock);
1107 if (do_wake)
1109 gomp_team_barrier_wake (&team->barrier, do_wake);
1110 do_wake = 0;
1112 if (to_free)
1114 gomp_finish_task (to_free);
1115 free (to_free);
1116 to_free = NULL;
1118 if (child_task)
1120 thr->task = child_task;
1121 child_task->fn (child_task->fn_data);
1122 thr->task = task;
1124 else
1125 gomp_sem_wait (&taskwait.taskwait_sem);
1126 gomp_mutex_lock (&team->task_lock);
1127 if (child_task)
1129 finish_cancelled:;
1130 size_t new_tasks
1131 = gomp_task_run_post_handle_depend (child_task, team);
1132 if (child_task->parent_depends_on)
1133 --taskwait.n_depend;
1135 /* Remove child_task from sibling list. */
1136 child_task->prev_child->next_child = child_task->next_child;
1137 child_task->next_child->prev_child = child_task->prev_child;
1138 if (task->children == child_task)
1140 if (child_task->next_child != child_task)
1141 task->children = child_task->next_child;
1142 else
1143 task->children = NULL;
1146 gomp_clear_parent (child_task->children);
1147 gomp_task_run_post_remove_taskgroup (child_task);
1148 to_free = child_task;
1149 child_task = NULL;
1150 team->task_count--;
1151 if (new_tasks > 1)
1153 do_wake = team->nthreads - team->task_running_count
1154 - !task->in_tied_task;
1155 if (do_wake > new_tasks)
1156 do_wake = new_tasks;
1162 /* Called when encountering a taskyield directive. */
1164 void
1165 GOMP_taskyield (void)
1167 /* Nothing at the moment. */
1170 void
1171 GOMP_taskgroup_start (void)
1173 struct gomp_thread *thr = gomp_thread ();
1174 struct gomp_team *team = thr->ts.team;
1175 struct gomp_task *task = thr->task;
1176 struct gomp_taskgroup *taskgroup;
1178 /* If team is NULL, all tasks are executed as
1179 GOMP_TASK_UNDEFERRED tasks and thus all children tasks of
1180 taskgroup and their descendant tasks will be finished
1181 by the time GOMP_taskgroup_end is called. */
1182 if (team == NULL)
1183 return;
1184 taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup));
1185 taskgroup->prev = task->taskgroup;
1186 taskgroup->children = NULL;
1187 taskgroup->in_taskgroup_wait = false;
1188 taskgroup->cancelled = false;
1189 taskgroup->num_children = 0;
1190 gomp_sem_init (&taskgroup->taskgroup_sem, 0);
1191 task->taskgroup = taskgroup;
1194 void
1195 GOMP_taskgroup_end (void)
1197 struct gomp_thread *thr = gomp_thread ();
1198 struct gomp_team *team = thr->ts.team;
1199 struct gomp_task *task = thr->task;
1200 struct gomp_taskgroup *taskgroup;
1201 struct gomp_task *child_task = NULL;
1202 struct gomp_task *to_free = NULL;
1203 int do_wake = 0;
1205 if (team == NULL)
1206 return;
1207 taskgroup = task->taskgroup;
1209 /* The acquire barrier on load of taskgroup->num_children here
1210 synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup.
1211 It is not necessary that we synchronize with other non-0 writes at
1212 this point, but we must ensure that all writes to memory by a
1213 child thread task work function are seen before we exit from
1214 GOMP_taskgroup_end. */
1215 if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0)
1216 goto finish;
1218 gomp_mutex_lock (&team->task_lock);
1219 while (1)
1221 bool cancelled = false;
1222 if (taskgroup->children == NULL)
1224 if (taskgroup->num_children)
1226 if (task->children == NULL)
1227 goto do_wait;
1228 child_task = task->children;
1230 else
1232 gomp_mutex_unlock (&team->task_lock);
1233 if (to_free)
1235 gomp_finish_task (to_free);
1236 free (to_free);
1238 goto finish;
1241 else
1242 child_task = taskgroup->children;
1243 if (child_task->kind == GOMP_TASK_WAITING)
1245 cancelled
1246 = gomp_task_run_pre (child_task, child_task->parent, taskgroup,
1247 team);
1248 if (__builtin_expect (cancelled, 0))
1250 if (to_free)
1252 gomp_finish_task (to_free);
1253 free (to_free);
1254 to_free = NULL;
1256 goto finish_cancelled;
1259 else
1261 child_task = NULL;
1262 do_wait:
1263 /* All tasks we are waiting for are already running
1264 in other threads. Wait for them. */
1265 taskgroup->in_taskgroup_wait = true;
1267 gomp_mutex_unlock (&team->task_lock);
1268 if (do_wake)
1270 gomp_team_barrier_wake (&team->barrier, do_wake);
1271 do_wake = 0;
1273 if (to_free)
1275 gomp_finish_task (to_free);
1276 free (to_free);
1277 to_free = NULL;
1279 if (child_task)
1281 thr->task = child_task;
1282 child_task->fn (child_task->fn_data);
1283 thr->task = task;
1285 else
1286 gomp_sem_wait (&taskgroup->taskgroup_sem);
1287 gomp_mutex_lock (&team->task_lock);
1288 if (child_task)
1290 finish_cancelled:;
1291 size_t new_tasks
1292 = gomp_task_run_post_handle_depend (child_task, team);
1293 gomp_task_run_post_remove_parent (child_task);
1294 gomp_clear_parent (child_task->children);
1295 gomp_task_run_post_remove_taskgroup (child_task);
1296 to_free = child_task;
1297 child_task = NULL;
1298 team->task_count--;
1299 if (new_tasks > 1)
1301 do_wake = team->nthreads - team->task_running_count
1302 - !task->in_tied_task;
1303 if (do_wake > new_tasks)
1304 do_wake = new_tasks;
1309 finish:
1310 task->taskgroup = taskgroup->prev;
1311 gomp_sem_destroy (&taskgroup->taskgroup_sem);
1312 free (taskgroup);
1316 omp_in_final (void)
1318 struct gomp_thread *thr = gomp_thread ();
1319 return thr->task && thr->task->final_task;
1322 ialias (omp_in_final)