* gcc.dg/lto/20080924_0.c: Fix defaulting to int.
[official-gcc.git] / libgomp / task.c
blob7d3233c6e1b3506dee10d5bfa4b2d23cd883eeaf
1 /* Copyright (C) 2007-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file handles the maintainence of tasks in response to task
26 creation and termination. */
28 #include "libgomp.h"
29 #include <stdlib.h>
30 #include <string.h>
32 typedef struct gomp_task_depend_entry *hash_entry_type;
34 static inline void *
35 htab_alloc (size_t size)
37 return gomp_malloc (size);
40 static inline void
41 htab_free (void *ptr)
43 free (ptr);
46 #include "hashtab.h"
48 static inline hashval_t
49 htab_hash (hash_entry_type element)
51 return hash_pointer (element->addr);
54 static inline bool
55 htab_eq (hash_entry_type x, hash_entry_type y)
57 return x->addr == y->addr;
60 /* Create a new task data structure. */
62 void
63 gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
64 struct gomp_task_icv *prev_icv)
66 task->parent = parent_task;
67 task->icv = *prev_icv;
68 task->kind = GOMP_TASK_IMPLICIT;
69 task->taskwait = NULL;
70 task->in_tied_task = false;
71 task->final_task = false;
72 task->copy_ctors_done = false;
73 task->parent_depends_on = false;
74 task->children = NULL;
75 task->taskgroup = NULL;
76 task->dependers = NULL;
77 task->depend_hash = NULL;
78 task->depend_count = 0;
81 /* Clean up a task, after completing it. */
83 void
84 gomp_end_task (void)
86 struct gomp_thread *thr = gomp_thread ();
87 struct gomp_task *task = thr->task;
89 gomp_finish_task (task);
90 thr->task = task->parent;
93 static inline void
94 gomp_clear_parent (struct gomp_task *children)
96 struct gomp_task *task = children;
98 if (task)
101 task->parent = NULL;
102 task = task->next_child;
104 while (task != children);
107 static void gomp_task_maybe_wait_for_dependencies (void **depend);
109 /* Called when encountering an explicit task directive. If IF_CLAUSE is
110 false, then we must not delay in executing the task. If UNTIED is true,
111 then the task may be executed by any member of the team. */
113 void
114 GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
115 long arg_size, long arg_align, bool if_clause, unsigned flags,
116 void **depend)
118 struct gomp_thread *thr = gomp_thread ();
119 struct gomp_team *team = thr->ts.team;
121 #ifdef HAVE_BROKEN_POSIX_SEMAPHORES
122 /* If pthread_mutex_* is used for omp_*lock*, then each task must be
123 tied to one thread all the time. This means UNTIED tasks must be
124 tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
125 might be running on different thread than FN. */
126 if (cpyfn)
127 if_clause = false;
128 if (flags & 1)
129 flags &= ~1;
130 #endif
132 /* If parallel or taskgroup has been cancelled, don't start new tasks. */
133 if (team
134 && (gomp_team_barrier_cancelled (&team->barrier)
135 || (thr->task->taskgroup && thr->task->taskgroup->cancelled)))
136 return;
138 if (!if_clause || team == NULL
139 || (thr->task && thr->task->final_task)
140 || team->task_count > 64 * team->nthreads)
142 struct gomp_task task;
144 /* If there are depend clauses and earlier deferred sibling tasks
145 with depend clauses, check if there isn't a dependency. If there
146 is, we need to wait for them. There is no need to handle
147 depend clauses for non-deferred tasks other than this, because
148 the parent task is suspended until the child task finishes and thus
149 it can't start further child tasks. */
150 if ((flags & 8) && thr->task && thr->task->depend_hash)
151 gomp_task_maybe_wait_for_dependencies (depend);
153 gomp_init_task (&task, thr->task, gomp_icv (false));
154 task.kind = GOMP_TASK_IFFALSE;
155 task.final_task = (thr->task && thr->task->final_task) || (flags & 2);
156 if (thr->task)
158 task.in_tied_task = thr->task->in_tied_task;
159 task.taskgroup = thr->task->taskgroup;
161 thr->task = &task;
162 if (__builtin_expect (cpyfn != NULL, 0))
164 char buf[arg_size + arg_align - 1];
165 char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
166 & ~(uintptr_t) (arg_align - 1));
167 cpyfn (arg, data);
168 fn (arg);
170 else
171 fn (data);
172 /* Access to "children" is normally done inside a task_lock
173 mutex region, but the only way this particular task.children
174 can be set is if this thread's task work function (fn)
175 creates children. So since the setter is *this* thread, we
176 need no barriers here when testing for non-NULL. We can have
177 task.children set by the current thread then changed by a
178 child thread, but seeing a stale non-NULL value is not a
179 problem. Once past the task_lock acquisition, this thread
180 will see the real value of task.children. */
181 if (task.children != NULL)
183 gomp_mutex_lock (&team->task_lock);
184 gomp_clear_parent (task.children);
185 gomp_mutex_unlock (&team->task_lock);
187 gomp_end_task ();
189 else
191 struct gomp_task *task;
192 struct gomp_task *parent = thr->task;
193 struct gomp_taskgroup *taskgroup = parent->taskgroup;
194 char *arg;
195 bool do_wake;
196 size_t depend_size = 0;
198 if (flags & 8)
199 depend_size = ((uintptr_t) depend[0]
200 * sizeof (struct gomp_task_depend_entry));
201 task = gomp_malloc (sizeof (*task) + depend_size
202 + arg_size + arg_align - 1);
203 arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1)
204 & ~(uintptr_t) (arg_align - 1));
205 gomp_init_task (task, parent, gomp_icv (false));
206 task->kind = GOMP_TASK_IFFALSE;
207 task->in_tied_task = parent->in_tied_task;
208 task->taskgroup = taskgroup;
209 thr->task = task;
210 if (cpyfn)
212 cpyfn (arg, data);
213 task->copy_ctors_done = true;
215 else
216 memcpy (arg, data, arg_size);
217 thr->task = parent;
218 task->kind = GOMP_TASK_WAITING;
219 task->fn = fn;
220 task->fn_data = arg;
221 task->final_task = (flags & 2) >> 1;
222 gomp_mutex_lock (&team->task_lock);
223 /* If parallel or taskgroup has been cancelled, don't start new
224 tasks. */
225 if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier)
226 || (taskgroup && taskgroup->cancelled))
227 && !task->copy_ctors_done, 0))
229 gomp_mutex_unlock (&team->task_lock);
230 gomp_finish_task (task);
231 free (task);
232 return;
234 if (taskgroup)
235 taskgroup->num_children++;
236 if (depend_size)
238 size_t ndepend = (uintptr_t) depend[0];
239 size_t nout = (uintptr_t) depend[1];
240 size_t i;
241 hash_entry_type ent;
243 task->depend_count = ndepend;
244 task->num_dependees = 0;
245 if (parent->depend_hash == NULL)
246 parent->depend_hash
247 = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12);
248 for (i = 0; i < ndepend; i++)
250 task->depend[i].addr = depend[2 + i];
251 task->depend[i].next = NULL;
252 task->depend[i].prev = NULL;
253 task->depend[i].task = task;
254 task->depend[i].is_in = i >= nout;
255 task->depend[i].redundant = false;
256 task->depend[i].redundant_out = false;
258 hash_entry_type *slot
259 = htab_find_slot (&parent->depend_hash, &task->depend[i],
260 INSERT);
261 hash_entry_type out = NULL, last = NULL;
262 if (*slot)
264 /* If multiple depends on the same task are the
265 same, all but the first one are redundant.
266 As inout/out come first, if any of them is
267 inout/out, it will win, which is the right
268 semantics. */
269 if ((*slot)->task == task)
271 task->depend[i].redundant = true;
272 continue;
274 for (ent = *slot; ent; ent = ent->next)
276 if (ent->redundant_out)
277 break;
279 last = ent;
281 /* depend(in:...) doesn't depend on earlier
282 depend(in:...). */
283 if (i >= nout && ent->is_in)
284 continue;
286 if (!ent->is_in)
287 out = ent;
289 struct gomp_task *tsk = ent->task;
290 if (tsk->dependers == NULL)
292 tsk->dependers
293 = gomp_malloc (sizeof (struct gomp_dependers_vec)
294 + 6 * sizeof (struct gomp_task *));
295 tsk->dependers->n_elem = 1;
296 tsk->dependers->allocated = 6;
297 tsk->dependers->elem[0] = task;
298 task->num_dependees++;
299 continue;
301 /* We already have some other dependency on tsk
302 from earlier depend clause. */
303 else if (tsk->dependers->n_elem
304 && (tsk->dependers->elem[tsk->dependers->n_elem
305 - 1]
306 == task))
307 continue;
308 else if (tsk->dependers->n_elem
309 == tsk->dependers->allocated)
311 tsk->dependers->allocated
312 = tsk->dependers->allocated * 2 + 2;
313 tsk->dependers
314 = gomp_realloc (tsk->dependers,
315 sizeof (struct gomp_dependers_vec)
316 + (tsk->dependers->allocated
317 * sizeof (struct gomp_task *)));
319 tsk->dependers->elem[tsk->dependers->n_elem++] = task;
320 task->num_dependees++;
322 task->depend[i].next = *slot;
323 (*slot)->prev = &task->depend[i];
325 *slot = &task->depend[i];
327 /* There is no need to store more than one depend({,in}out:)
328 task per address in the hash table chain for the purpose
329 of creation of deferred tasks, because each out
330 depends on all earlier outs, thus it is enough to record
331 just the last depend({,in}out:). For depend(in:), we need
332 to keep all of the previous ones not terminated yet, because
333 a later depend({,in}out:) might need to depend on all of
334 them. So, if the new task's clause is depend({,in}out:),
335 we know there is at most one other depend({,in}out:) clause
336 in the list (out). For non-deferred tasks we want to see
337 all outs, so they are moved to the end of the chain,
338 after first redundant_out entry all following entries
339 should be redundant_out. */
340 if (!task->depend[i].is_in && out)
342 if (out != last)
344 out->next->prev = out->prev;
345 out->prev->next = out->next;
346 out->next = last->next;
347 out->prev = last;
348 last->next = out;
349 if (out->next)
350 out->next->prev = out;
352 out->redundant_out = true;
355 if (task->num_dependees)
357 gomp_mutex_unlock (&team->task_lock);
358 return;
361 if (parent->children)
363 task->next_child = parent->children;
364 task->prev_child = parent->children->prev_child;
365 task->next_child->prev_child = task;
366 task->prev_child->next_child = task;
368 else
370 task->next_child = task;
371 task->prev_child = task;
373 parent->children = task;
374 if (taskgroup)
376 if (taskgroup->children)
378 task->next_taskgroup = taskgroup->children;
379 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
380 task->next_taskgroup->prev_taskgroup = task;
381 task->prev_taskgroup->next_taskgroup = task;
383 else
385 task->next_taskgroup = task;
386 task->prev_taskgroup = task;
388 taskgroup->children = task;
390 if (team->task_queue)
392 task->next_queue = team->task_queue;
393 task->prev_queue = team->task_queue->prev_queue;
394 task->next_queue->prev_queue = task;
395 task->prev_queue->next_queue = task;
397 else
399 task->next_queue = task;
400 task->prev_queue = task;
401 team->task_queue = task;
403 ++team->task_count;
404 ++team->task_queued_count;
405 gomp_team_barrier_set_task_pending (&team->barrier);
406 do_wake = team->task_running_count + !parent->in_tied_task
407 < team->nthreads;
408 gomp_mutex_unlock (&team->task_lock);
409 if (do_wake)
410 gomp_team_barrier_wake (&team->barrier, 1);
414 static inline bool
415 gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent,
416 struct gomp_taskgroup *taskgroup, struct gomp_team *team)
418 if (parent)
420 if (parent->children == child_task)
421 parent->children = child_task->next_child;
422 if (__builtin_expect (child_task->parent_depends_on, 0)
423 && parent->taskwait->last_parent_depends_on == child_task)
425 if (child_task->prev_child->kind == GOMP_TASK_WAITING
426 && child_task->prev_child->parent_depends_on)
427 parent->taskwait->last_parent_depends_on = child_task->prev_child;
428 else
429 parent->taskwait->last_parent_depends_on = NULL;
432 if (taskgroup && taskgroup->children == child_task)
433 taskgroup->children = child_task->next_taskgroup;
434 child_task->prev_queue->next_queue = child_task->next_queue;
435 child_task->next_queue->prev_queue = child_task->prev_queue;
436 if (team->task_queue == child_task)
438 if (child_task->next_queue != child_task)
439 team->task_queue = child_task->next_queue;
440 else
441 team->task_queue = NULL;
443 child_task->kind = GOMP_TASK_TIED;
444 if (--team->task_queued_count == 0)
445 gomp_team_barrier_clear_task_pending (&team->barrier);
446 if ((gomp_team_barrier_cancelled (&team->barrier)
447 || (taskgroup && taskgroup->cancelled))
448 && !child_task->copy_ctors_done)
449 return true;
450 return false;
453 static void
454 gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task)
456 struct gomp_task *parent = child_task->parent;
457 size_t i;
459 for (i = 0; i < child_task->depend_count; i++)
460 if (!child_task->depend[i].redundant)
462 if (child_task->depend[i].next)
463 child_task->depend[i].next->prev = child_task->depend[i].prev;
464 if (child_task->depend[i].prev)
465 child_task->depend[i].prev->next = child_task->depend[i].next;
466 else
468 hash_entry_type *slot
469 = htab_find_slot (&parent->depend_hash, &child_task->depend[i],
470 NO_INSERT);
471 if (*slot != &child_task->depend[i])
472 abort ();
473 if (child_task->depend[i].next)
474 *slot = child_task->depend[i].next;
475 else
476 htab_clear_slot (parent->depend_hash, slot);
481 static size_t
482 gomp_task_run_post_handle_dependers (struct gomp_task *child_task,
483 struct gomp_team *team)
485 struct gomp_task *parent = child_task->parent;
486 size_t i, count = child_task->dependers->n_elem, ret = 0;
487 for (i = 0; i < count; i++)
489 struct gomp_task *task = child_task->dependers->elem[i];
490 if (--task->num_dependees != 0)
491 continue;
493 struct gomp_taskgroup *taskgroup = task->taskgroup;
494 if (parent)
496 if (parent->children)
498 /* If parent is in gomp_task_maybe_wait_for_dependencies
499 and it doesn't need to wait for this task, put it after
500 all ready to run tasks it needs to wait for. */
501 if (parent->taskwait && parent->taskwait->last_parent_depends_on
502 && !task->parent_depends_on)
504 struct gomp_task *last_parent_depends_on
505 = parent->taskwait->last_parent_depends_on;
506 task->next_child = last_parent_depends_on->next_child;
507 task->prev_child = last_parent_depends_on;
509 else
511 task->next_child = parent->children;
512 task->prev_child = parent->children->prev_child;
513 parent->children = task;
515 task->next_child->prev_child = task;
516 task->prev_child->next_child = task;
518 else
520 task->next_child = task;
521 task->prev_child = task;
522 parent->children = task;
524 if (parent->taskwait)
526 if (parent->taskwait->in_taskwait)
528 parent->taskwait->in_taskwait = false;
529 gomp_sem_post (&parent->taskwait->taskwait_sem);
531 else if (parent->taskwait->in_depend_wait)
533 parent->taskwait->in_depend_wait = false;
534 gomp_sem_post (&parent->taskwait->taskwait_sem);
536 if (parent->taskwait->last_parent_depends_on == NULL
537 && task->parent_depends_on)
538 parent->taskwait->last_parent_depends_on = task;
541 if (taskgroup)
543 if (taskgroup->children)
545 task->next_taskgroup = taskgroup->children;
546 task->prev_taskgroup = taskgroup->children->prev_taskgroup;
547 task->next_taskgroup->prev_taskgroup = task;
548 task->prev_taskgroup->next_taskgroup = task;
550 else
552 task->next_taskgroup = task;
553 task->prev_taskgroup = task;
555 taskgroup->children = task;
556 if (taskgroup->in_taskgroup_wait)
558 taskgroup->in_taskgroup_wait = false;
559 gomp_sem_post (&taskgroup->taskgroup_sem);
562 if (team->task_queue)
564 task->next_queue = team->task_queue;
565 task->prev_queue = team->task_queue->prev_queue;
566 task->next_queue->prev_queue = task;
567 task->prev_queue->next_queue = task;
569 else
571 task->next_queue = task;
572 task->prev_queue = task;
573 team->task_queue = task;
575 ++team->task_count;
576 ++team->task_queued_count;
577 ++ret;
579 free (child_task->dependers);
580 child_task->dependers = NULL;
581 if (ret > 1)
582 gomp_team_barrier_set_task_pending (&team->barrier);
583 return ret;
586 static inline size_t
587 gomp_task_run_post_handle_depend (struct gomp_task *child_task,
588 struct gomp_team *team)
590 if (child_task->depend_count == 0)
591 return 0;
593 /* If parent is gone already, the hash table is freed and nothing
594 will use the hash table anymore, no need to remove anything from it. */
595 if (child_task->parent != NULL)
596 gomp_task_run_post_handle_depend_hash (child_task);
598 if (child_task->dependers == NULL)
599 return 0;
601 return gomp_task_run_post_handle_dependers (child_task, team);
604 static inline void
605 gomp_task_run_post_remove_parent (struct gomp_task *child_task)
607 struct gomp_task *parent = child_task->parent;
608 if (parent == NULL)
609 return;
610 if (__builtin_expect (child_task->parent_depends_on, 0)
611 && --parent->taskwait->n_depend == 0
612 && parent->taskwait->in_depend_wait)
614 parent->taskwait->in_depend_wait = false;
615 gomp_sem_post (&parent->taskwait->taskwait_sem);
617 child_task->prev_child->next_child = child_task->next_child;
618 child_task->next_child->prev_child = child_task->prev_child;
619 if (parent->children != child_task)
620 return;
621 if (child_task->next_child != child_task)
622 parent->children = child_task->next_child;
623 else
625 /* We access task->children in GOMP_taskwait
626 outside of the task lock mutex region, so
627 need a release barrier here to ensure memory
628 written by child_task->fn above is flushed
629 before the NULL is written. */
630 __atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE);
631 if (parent->taskwait && parent->taskwait->in_taskwait)
633 parent->taskwait->in_taskwait = false;
634 gomp_sem_post (&parent->taskwait->taskwait_sem);
639 static inline void
640 gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task)
642 struct gomp_taskgroup *taskgroup = child_task->taskgroup;
643 if (taskgroup == NULL)
644 return;
645 child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup;
646 child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup;
647 if (taskgroup->num_children > 1)
648 --taskgroup->num_children;
649 else
651 /* We access taskgroup->num_children in GOMP_taskgroup_end
652 outside of the task lock mutex region, so
653 need a release barrier here to ensure memory
654 written by child_task->fn above is flushed
655 before the NULL is written. */
656 __atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE);
658 if (taskgroup->children != child_task)
659 return;
660 if (child_task->next_taskgroup != child_task)
661 taskgroup->children = child_task->next_taskgroup;
662 else
664 taskgroup->children = NULL;
665 if (taskgroup->in_taskgroup_wait)
667 taskgroup->in_taskgroup_wait = false;
668 gomp_sem_post (&taskgroup->taskgroup_sem);
673 void
674 gomp_barrier_handle_tasks (gomp_barrier_state_t state)
676 struct gomp_thread *thr = gomp_thread ();
677 struct gomp_team *team = thr->ts.team;
678 struct gomp_task *task = thr->task;
679 struct gomp_task *child_task = NULL;
680 struct gomp_task *to_free = NULL;
681 int do_wake = 0;
683 gomp_mutex_lock (&team->task_lock);
684 if (gomp_barrier_last_thread (state))
686 if (team->task_count == 0)
688 gomp_team_barrier_done (&team->barrier, state);
689 gomp_mutex_unlock (&team->task_lock);
690 gomp_team_barrier_wake (&team->barrier, 0);
691 return;
693 gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
696 while (1)
698 bool cancelled = false;
699 if (team->task_queue != NULL)
701 child_task = team->task_queue;
702 cancelled = gomp_task_run_pre (child_task, child_task->parent,
703 child_task->taskgroup, team);
704 if (__builtin_expect (cancelled, 0))
706 if (to_free)
708 gomp_finish_task (to_free);
709 free (to_free);
710 to_free = NULL;
712 goto finish_cancelled;
714 team->task_running_count++;
715 child_task->in_tied_task = true;
717 gomp_mutex_unlock (&team->task_lock);
718 if (do_wake)
720 gomp_team_barrier_wake (&team->barrier, do_wake);
721 do_wake = 0;
723 if (to_free)
725 gomp_finish_task (to_free);
726 free (to_free);
727 to_free = NULL;
729 if (child_task)
731 thr->task = child_task;
732 child_task->fn (child_task->fn_data);
733 thr->task = task;
735 else
736 return;
737 gomp_mutex_lock (&team->task_lock);
738 if (child_task)
740 finish_cancelled:;
741 size_t new_tasks
742 = gomp_task_run_post_handle_depend (child_task, team);
743 gomp_task_run_post_remove_parent (child_task);
744 gomp_clear_parent (child_task->children);
745 gomp_task_run_post_remove_taskgroup (child_task);
746 to_free = child_task;
747 child_task = NULL;
748 if (!cancelled)
749 team->task_running_count--;
750 if (new_tasks > 1)
752 do_wake = team->nthreads - team->task_running_count;
753 if (do_wake > new_tasks)
754 do_wake = new_tasks;
756 if (--team->task_count == 0
757 && gomp_team_barrier_waiting_for_tasks (&team->barrier))
759 gomp_team_barrier_done (&team->barrier, state);
760 gomp_mutex_unlock (&team->task_lock);
761 gomp_team_barrier_wake (&team->barrier, 0);
762 gomp_mutex_lock (&team->task_lock);
768 /* Called when encountering a taskwait directive. */
770 void
771 GOMP_taskwait (void)
773 struct gomp_thread *thr = gomp_thread ();
774 struct gomp_team *team = thr->ts.team;
775 struct gomp_task *task = thr->task;
776 struct gomp_task *child_task = NULL;
777 struct gomp_task *to_free = NULL;
778 struct gomp_taskwait taskwait;
779 int do_wake = 0;
781 /* The acquire barrier on load of task->children here synchronizes
782 with the write of a NULL in gomp_task_run_post_remove_parent. It is
783 not necessary that we synchronize with other non-NULL writes at
784 this point, but we must ensure that all writes to memory by a
785 child thread task work function are seen before we exit from
786 GOMP_taskwait. */
787 if (task == NULL
788 || __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL)
789 return;
791 memset (&taskwait, 0, sizeof (taskwait));
792 gomp_mutex_lock (&team->task_lock);
793 while (1)
795 bool cancelled = false;
796 if (task->children == NULL)
798 bool destroy_taskwait = task->taskwait != NULL;
799 task->taskwait = NULL;
800 gomp_mutex_unlock (&team->task_lock);
801 if (to_free)
803 gomp_finish_task (to_free);
804 free (to_free);
806 if (destroy_taskwait)
807 gomp_sem_destroy (&taskwait.taskwait_sem);
808 return;
810 if (task->children->kind == GOMP_TASK_WAITING)
812 child_task = task->children;
813 cancelled
814 = gomp_task_run_pre (child_task, task, child_task->taskgroup,
815 team);
816 if (__builtin_expect (cancelled, 0))
818 if (to_free)
820 gomp_finish_task (to_free);
821 free (to_free);
822 to_free = NULL;
824 goto finish_cancelled;
827 else
829 /* All tasks we are waiting for are already running
830 in other threads. Wait for them. */
831 if (task->taskwait == NULL)
833 taskwait.in_depend_wait = false;
834 gomp_sem_init (&taskwait.taskwait_sem, 0);
835 task->taskwait = &taskwait;
837 taskwait.in_taskwait = true;
839 gomp_mutex_unlock (&team->task_lock);
840 if (do_wake)
842 gomp_team_barrier_wake (&team->barrier, do_wake);
843 do_wake = 0;
845 if (to_free)
847 gomp_finish_task (to_free);
848 free (to_free);
849 to_free = NULL;
851 if (child_task)
853 thr->task = child_task;
854 child_task->fn (child_task->fn_data);
855 thr->task = task;
857 else
858 gomp_sem_wait (&taskwait.taskwait_sem);
859 gomp_mutex_lock (&team->task_lock);
860 if (child_task)
862 finish_cancelled:;
863 size_t new_tasks
864 = gomp_task_run_post_handle_depend (child_task, team);
865 child_task->prev_child->next_child = child_task->next_child;
866 child_task->next_child->prev_child = child_task->prev_child;
867 if (task->children == child_task)
869 if (child_task->next_child != child_task)
870 task->children = child_task->next_child;
871 else
872 task->children = NULL;
874 gomp_clear_parent (child_task->children);
875 gomp_task_run_post_remove_taskgroup (child_task);
876 to_free = child_task;
877 child_task = NULL;
878 team->task_count--;
879 if (new_tasks > 1)
881 do_wake = team->nthreads - team->task_running_count
882 - !task->in_tied_task;
883 if (do_wake > new_tasks)
884 do_wake = new_tasks;
890 /* This is like GOMP_taskwait, but we only wait for tasks that the
891 upcoming task depends on. */
893 static void
894 gomp_task_maybe_wait_for_dependencies (void **depend)
896 struct gomp_thread *thr = gomp_thread ();
897 struct gomp_task *task = thr->task;
898 struct gomp_team *team = thr->ts.team;
899 struct gomp_task_depend_entry elem, *ent = NULL;
900 struct gomp_taskwait taskwait;
901 struct gomp_task *last_parent_depends_on = NULL;
902 size_t ndepend = (uintptr_t) depend[0];
903 size_t nout = (uintptr_t) depend[1];
904 size_t i;
905 size_t num_awaited = 0;
906 struct gomp_task *child_task = NULL;
907 struct gomp_task *to_free = NULL;
908 int do_wake = 0;
910 gomp_mutex_lock (&team->task_lock);
911 for (i = 0; i < ndepend; i++)
913 elem.addr = depend[i + 2];
914 ent = htab_find (task->depend_hash, &elem);
915 for (; ent; ent = ent->next)
916 if (i >= nout && ent->is_in)
917 continue;
918 else
920 struct gomp_task *tsk = ent->task;
921 if (!tsk->parent_depends_on)
923 tsk->parent_depends_on = true;
924 ++num_awaited;
925 if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING)
927 /* If a task we need to wait for is not already
928 running and is ready to be scheduled, move it
929 to front, so that we run it as soon as possible. */
930 if (last_parent_depends_on)
932 tsk->prev_child->next_child = tsk->next_child;
933 tsk->next_child->prev_child = tsk->prev_child;
934 tsk->prev_child = last_parent_depends_on;
935 tsk->next_child = last_parent_depends_on->next_child;
936 tsk->prev_child->next_child = tsk;
937 tsk->next_child->prev_child = tsk;
939 else if (tsk != task->children)
941 tsk->prev_child->next_child = tsk->next_child;
942 tsk->next_child->prev_child = tsk->prev_child;
943 tsk->prev_child = task->children;
944 tsk->next_child = task->children->next_child;
945 task->children = tsk;
946 tsk->prev_child->next_child = tsk;
947 tsk->next_child->prev_child = tsk;
949 last_parent_depends_on = tsk;
954 if (num_awaited == 0)
956 gomp_mutex_unlock (&team->task_lock);
957 return;
960 memset (&taskwait, 0, sizeof (taskwait));
961 taskwait.n_depend = num_awaited;
962 taskwait.last_parent_depends_on = last_parent_depends_on;
963 gomp_sem_init (&taskwait.taskwait_sem, 0);
964 task->taskwait = &taskwait;
966 while (1)
968 bool cancelled = false;
969 if (taskwait.n_depend == 0)
971 task->taskwait = NULL;
972 gomp_mutex_unlock (&team->task_lock);
973 if (to_free)
975 gomp_finish_task (to_free);
976 free (to_free);
978 gomp_sem_destroy (&taskwait.taskwait_sem);
979 return;
981 if (task->children->kind == GOMP_TASK_WAITING)
983 child_task = task->children;
984 cancelled
985 = gomp_task_run_pre (child_task, task, child_task->taskgroup,
986 team);
987 if (__builtin_expect (cancelled, 0))
989 if (to_free)
991 gomp_finish_task (to_free);
992 free (to_free);
993 to_free = NULL;
995 goto finish_cancelled;
998 else
999 /* All tasks we are waiting for are already running
1000 in other threads. Wait for them. */
1001 taskwait.in_depend_wait = true;
1002 gomp_mutex_unlock (&team->task_lock);
1003 if (do_wake)
1005 gomp_team_barrier_wake (&team->barrier, do_wake);
1006 do_wake = 0;
1008 if (to_free)
1010 gomp_finish_task (to_free);
1011 free (to_free);
1012 to_free = NULL;
1014 if (child_task)
1016 thr->task = child_task;
1017 child_task->fn (child_task->fn_data);
1018 thr->task = task;
1020 else
1021 gomp_sem_wait (&taskwait.taskwait_sem);
1022 gomp_mutex_lock (&team->task_lock);
1023 if (child_task)
1025 finish_cancelled:;
1026 size_t new_tasks
1027 = gomp_task_run_post_handle_depend (child_task, team);
1028 if (child_task->parent_depends_on)
1029 --taskwait.n_depend;
1030 child_task->prev_child->next_child = child_task->next_child;
1031 child_task->next_child->prev_child = child_task->prev_child;
1032 if (task->children == child_task)
1034 if (child_task->next_child != child_task)
1035 task->children = child_task->next_child;
1036 else
1037 task->children = NULL;
1039 gomp_clear_parent (child_task->children);
1040 gomp_task_run_post_remove_taskgroup (child_task);
1041 to_free = child_task;
1042 child_task = NULL;
1043 team->task_count--;
1044 if (new_tasks > 1)
1046 do_wake = team->nthreads - team->task_running_count
1047 - !task->in_tied_task;
1048 if (do_wake > new_tasks)
1049 do_wake = new_tasks;
1055 /* Called when encountering a taskyield directive. */
1057 void
1058 GOMP_taskyield (void)
1060 /* Nothing at the moment. */
1063 void
1064 GOMP_taskgroup_start (void)
1066 struct gomp_thread *thr = gomp_thread ();
1067 struct gomp_team *team = thr->ts.team;
1068 struct gomp_task *task = thr->task;
1069 struct gomp_taskgroup *taskgroup;
1071 /* If team is NULL, all tasks are executed as
1072 GOMP_TASK_IFFALSE tasks and thus all children tasks of
1073 taskgroup and their descendant tasks will be finished
1074 by the time GOMP_taskgroup_end is called. */
1075 if (team == NULL)
1076 return;
1077 taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup));
1078 taskgroup->prev = task->taskgroup;
1079 taskgroup->children = NULL;
1080 taskgroup->in_taskgroup_wait = false;
1081 taskgroup->cancelled = false;
1082 taskgroup->num_children = 0;
1083 gomp_sem_init (&taskgroup->taskgroup_sem, 0);
1084 task->taskgroup = taskgroup;
1087 void
1088 GOMP_taskgroup_end (void)
1090 struct gomp_thread *thr = gomp_thread ();
1091 struct gomp_team *team = thr->ts.team;
1092 struct gomp_task *task = thr->task;
1093 struct gomp_taskgroup *taskgroup;
1094 struct gomp_task *child_task = NULL;
1095 struct gomp_task *to_free = NULL;
1096 int do_wake = 0;
1098 if (team == NULL)
1099 return;
1100 taskgroup = task->taskgroup;
1102 /* The acquire barrier on load of taskgroup->num_children here
1103 synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup.
1104 It is not necessary that we synchronize with other non-0 writes at
1105 this point, but we must ensure that all writes to memory by a
1106 child thread task work function are seen before we exit from
1107 GOMP_taskgroup_end. */
1108 if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0)
1109 goto finish;
1111 gomp_mutex_lock (&team->task_lock);
1112 while (1)
1114 bool cancelled = false;
1115 if (taskgroup->children == NULL)
1117 if (taskgroup->num_children)
1119 if (task->children == NULL)
1120 goto do_wait;
1121 child_task = task->children;
1123 else
1125 gomp_mutex_unlock (&team->task_lock);
1126 if (to_free)
1128 gomp_finish_task (to_free);
1129 free (to_free);
1131 goto finish;
1134 else
1135 child_task = taskgroup->children;
1136 if (child_task->kind == GOMP_TASK_WAITING)
1138 cancelled
1139 = gomp_task_run_pre (child_task, child_task->parent, taskgroup,
1140 team);
1141 if (__builtin_expect (cancelled, 0))
1143 if (to_free)
1145 gomp_finish_task (to_free);
1146 free (to_free);
1147 to_free = NULL;
1149 goto finish_cancelled;
1152 else
1154 child_task = NULL;
1155 do_wait:
1156 /* All tasks we are waiting for are already running
1157 in other threads. Wait for them. */
1158 taskgroup->in_taskgroup_wait = true;
1160 gomp_mutex_unlock (&team->task_lock);
1161 if (do_wake)
1163 gomp_team_barrier_wake (&team->barrier, do_wake);
1164 do_wake = 0;
1166 if (to_free)
1168 gomp_finish_task (to_free);
1169 free (to_free);
1170 to_free = NULL;
1172 if (child_task)
1174 thr->task = child_task;
1175 child_task->fn (child_task->fn_data);
1176 thr->task = task;
1178 else
1179 gomp_sem_wait (&taskgroup->taskgroup_sem);
1180 gomp_mutex_lock (&team->task_lock);
1181 if (child_task)
1183 finish_cancelled:;
1184 size_t new_tasks
1185 = gomp_task_run_post_handle_depend (child_task, team);
1186 gomp_task_run_post_remove_parent (child_task);
1187 gomp_clear_parent (child_task->children);
1188 gomp_task_run_post_remove_taskgroup (child_task);
1189 to_free = child_task;
1190 child_task = NULL;
1191 team->task_count--;
1192 if (new_tasks > 1)
1194 do_wake = team->nthreads - team->task_running_count
1195 - !task->in_tied_task;
1196 if (do_wake > new_tasks)
1197 do_wake = new_tasks;
1202 finish:
1203 task->taskgroup = taskgroup->prev;
1204 gomp_sem_destroy (&taskgroup->taskgroup_sem);
1205 free (taskgroup);
1209 omp_in_final (void)
1211 struct gomp_thread *thr = gomp_thread ();
1212 return thr->task && thr->task->final_task;
1215 ialias (omp_in_final)