2013-05-30 Ed Smith-Rowland <3dw4rd@verizon.net>
[official-gcc.git] / libgomp / task.c
blob7de650a43f1457edcf093b1b6c24835a4167b10d
1 /* Copyright (C) 2007-2013 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file handles the maintainence of tasks in response to task
26 creation and termination. */
28 #include "libgomp.h"
29 #include <stdlib.h>
30 #include <string.h>
33 /* Create a new task data structure. */
35 void
36 gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task,
37 struct gomp_task_icv *prev_icv)
39 task->parent = parent_task;
40 task->icv = *prev_icv;
41 task->kind = GOMP_TASK_IMPLICIT;
42 task->in_taskwait = false;
43 task->in_tied_task = false;
44 task->final_task = false;
45 task->children = NULL;
46 gomp_sem_init (&task->taskwait_sem, 0);
49 /* Clean up a task, after completing it. */
51 void
52 gomp_end_task (void)
54 struct gomp_thread *thr = gomp_thread ();
55 struct gomp_task *task = thr->task;
57 gomp_finish_task (task);
58 thr->task = task->parent;
61 static inline void
62 gomp_clear_parent (struct gomp_task *children)
64 struct gomp_task *task = children;
66 if (task)
69 task->parent = NULL;
70 task = task->next_child;
72 while (task != children);
75 /* Called when encountering an explicit task directive. If IF_CLAUSE is
76 false, then we must not delay in executing the task. If UNTIED is true,
77 then the task may be executed by any member of the team. */
79 void
80 GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *),
81 long arg_size, long arg_align, bool if_clause, unsigned flags)
83 struct gomp_thread *thr = gomp_thread ();
84 struct gomp_team *team = thr->ts.team;
86 #ifdef HAVE_BROKEN_POSIX_SEMAPHORES
87 /* If pthread_mutex_* is used for omp_*lock*, then each task must be
88 tied to one thread all the time. This means UNTIED tasks must be
89 tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
90 might be running on different thread than FN. */
91 if (cpyfn)
92 if_clause = false;
93 if (flags & 1)
94 flags &= ~1;
95 #endif
97 if (!if_clause || team == NULL
98 || (thr->task && thr->task->final_task)
99 || team->task_count > 64 * team->nthreads)
101 struct gomp_task task;
103 gomp_init_task (&task, thr->task, gomp_icv (false));
104 task.kind = GOMP_TASK_IFFALSE;
105 task.final_task = (thr->task && thr->task->final_task) || (flags & 2);
106 if (thr->task)
107 task.in_tied_task = thr->task->in_tied_task;
108 thr->task = &task;
109 if (__builtin_expect (cpyfn != NULL, 0))
111 char buf[arg_size + arg_align - 1];
112 char *arg = (char *) (((uintptr_t) buf + arg_align - 1)
113 & ~(uintptr_t) (arg_align - 1));
114 cpyfn (arg, data);
115 fn (arg);
117 else
118 fn (data);
119 /* Access to "children" is normally done inside a task_lock
120 mutex region, but the only way this particular task.children
121 can be set is if this thread's task work function (fn)
122 creates children. So since the setter is *this* thread, we
123 need no barriers here when testing for non-NULL. We can have
124 task.children set by the current thread then changed by a
125 child thread, but seeing a stale non-NULL value is not a
126 problem. Once past the task_lock acquisition, this thread
127 will see the real value of task.children. */
128 if (task.children != NULL)
130 gomp_mutex_lock (&team->task_lock);
131 gomp_clear_parent (task.children);
132 gomp_mutex_unlock (&team->task_lock);
134 gomp_end_task ();
136 else
138 struct gomp_task *task;
139 struct gomp_task *parent = thr->task;
140 char *arg;
141 bool do_wake;
143 task = gomp_malloc (sizeof (*task) + arg_size + arg_align - 1);
144 arg = (char *) (((uintptr_t) (task + 1) + arg_align - 1)
145 & ~(uintptr_t) (arg_align - 1));
146 gomp_init_task (task, parent, gomp_icv (false));
147 task->kind = GOMP_TASK_IFFALSE;
148 task->in_tied_task = parent->in_tied_task;
149 thr->task = task;
150 if (cpyfn)
151 cpyfn (arg, data);
152 else
153 memcpy (arg, data, arg_size);
154 thr->task = parent;
155 task->kind = GOMP_TASK_WAITING;
156 task->fn = fn;
157 task->fn_data = arg;
158 task->in_tied_task = true;
159 task->final_task = (flags & 2) >> 1;
160 gomp_mutex_lock (&team->task_lock);
161 if (parent->children)
163 task->next_child = parent->children;
164 task->prev_child = parent->children->prev_child;
165 task->next_child->prev_child = task;
166 task->prev_child->next_child = task;
168 else
170 task->next_child = task;
171 task->prev_child = task;
173 parent->children = task;
174 if (team->task_queue)
176 task->next_queue = team->task_queue;
177 task->prev_queue = team->task_queue->prev_queue;
178 task->next_queue->prev_queue = task;
179 task->prev_queue->next_queue = task;
181 else
183 task->next_queue = task;
184 task->prev_queue = task;
185 team->task_queue = task;
187 ++team->task_count;
188 gomp_team_barrier_set_task_pending (&team->barrier);
189 do_wake = team->task_running_count + !parent->in_tied_task
190 < team->nthreads;
191 gomp_mutex_unlock (&team->task_lock);
192 if (do_wake)
193 gomp_team_barrier_wake (&team->barrier, 1);
197 void
198 gomp_barrier_handle_tasks (gomp_barrier_state_t state)
200 struct gomp_thread *thr = gomp_thread ();
201 struct gomp_team *team = thr->ts.team;
202 struct gomp_task *task = thr->task;
203 struct gomp_task *child_task = NULL;
204 struct gomp_task *to_free = NULL;
206 gomp_mutex_lock (&team->task_lock);
207 if (gomp_barrier_last_thread (state))
209 if (team->task_count == 0)
211 gomp_team_barrier_done (&team->barrier, state);
212 gomp_mutex_unlock (&team->task_lock);
213 gomp_team_barrier_wake (&team->barrier, 0);
214 return;
216 gomp_team_barrier_set_waiting_for_tasks (&team->barrier);
219 while (1)
221 if (team->task_queue != NULL)
223 struct gomp_task *parent;
225 child_task = team->task_queue;
226 parent = child_task->parent;
227 if (parent && parent->children == child_task)
228 parent->children = child_task->next_child;
229 child_task->prev_queue->next_queue = child_task->next_queue;
230 child_task->next_queue->prev_queue = child_task->prev_queue;
231 if (child_task->next_queue != child_task)
232 team->task_queue = child_task->next_queue;
233 else
234 team->task_queue = NULL;
235 child_task->kind = GOMP_TASK_TIED;
236 team->task_running_count++;
237 if (team->task_count == team->task_running_count)
238 gomp_team_barrier_clear_task_pending (&team->barrier);
240 gomp_mutex_unlock (&team->task_lock);
241 if (to_free)
243 gomp_finish_task (to_free);
244 free (to_free);
245 to_free = NULL;
247 if (child_task)
249 thr->task = child_task;
250 child_task->fn (child_task->fn_data);
251 thr->task = task;
253 else
254 return;
255 gomp_mutex_lock (&team->task_lock);
256 if (child_task)
258 struct gomp_task *parent = child_task->parent;
259 if (parent)
261 child_task->prev_child->next_child = child_task->next_child;
262 child_task->next_child->prev_child = child_task->prev_child;
263 if (parent->children == child_task)
265 if (child_task->next_child != child_task)
266 parent->children = child_task->next_child;
267 else
269 /* We access task->children in GOMP_taskwait
270 outside of the task lock mutex region, so
271 need a release barrier here to ensure memory
272 written by child_task->fn above is flushed
273 before the NULL is written. */
274 __atomic_store_n (&parent->children, NULL,
275 MEMMODEL_RELEASE);
276 if (parent->in_taskwait)
277 gomp_sem_post (&parent->taskwait_sem);
281 gomp_clear_parent (child_task->children);
282 to_free = child_task;
283 child_task = NULL;
284 team->task_running_count--;
285 if (--team->task_count == 0
286 && gomp_team_barrier_waiting_for_tasks (&team->barrier))
288 gomp_team_barrier_done (&team->barrier, state);
289 gomp_mutex_unlock (&team->task_lock);
290 gomp_team_barrier_wake (&team->barrier, 0);
291 gomp_mutex_lock (&team->task_lock);
297 /* Called when encountering a taskwait directive. */
299 void
300 GOMP_taskwait (void)
302 struct gomp_thread *thr = gomp_thread ();
303 struct gomp_team *team = thr->ts.team;
304 struct gomp_task *task = thr->task;
305 struct gomp_task *child_task = NULL;
306 struct gomp_task *to_free = NULL;
308 /* The acquire barrier on load of task->children here synchronizes
309 with the write of a NULL in gomp_barrier_handle_tasks. It is
310 not necessary that we synchronize with other non-NULL writes at
311 this point, but we must ensure that all writes to memory by a
312 child thread task work function are seen before we exit from
313 GOMP_taskwait. */
314 if (task == NULL
315 || __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL)
316 return;
318 gomp_mutex_lock (&team->task_lock);
319 while (1)
321 if (task->children == NULL)
323 gomp_mutex_unlock (&team->task_lock);
324 if (to_free)
326 gomp_finish_task (to_free);
327 free (to_free);
329 return;
331 if (task->children->kind == GOMP_TASK_WAITING)
333 child_task = task->children;
334 task->children = child_task->next_child;
335 child_task->prev_queue->next_queue = child_task->next_queue;
336 child_task->next_queue->prev_queue = child_task->prev_queue;
337 if (team->task_queue == child_task)
339 if (child_task->next_queue != child_task)
340 team->task_queue = child_task->next_queue;
341 else
342 team->task_queue = NULL;
344 child_task->kind = GOMP_TASK_TIED;
345 team->task_running_count++;
346 if (team->task_count == team->task_running_count)
347 gomp_team_barrier_clear_task_pending (&team->barrier);
349 else
350 /* All tasks we are waiting for are already running
351 in other threads. Wait for them. */
352 task->in_taskwait = true;
353 gomp_mutex_unlock (&team->task_lock);
354 if (to_free)
356 gomp_finish_task (to_free);
357 free (to_free);
358 to_free = NULL;
360 if (child_task)
362 thr->task = child_task;
363 child_task->fn (child_task->fn_data);
364 thr->task = task;
366 else
368 gomp_sem_wait (&task->taskwait_sem);
369 task->in_taskwait = false;
370 return;
372 gomp_mutex_lock (&team->task_lock);
373 if (child_task)
375 child_task->prev_child->next_child = child_task->next_child;
376 child_task->next_child->prev_child = child_task->prev_child;
377 if (task->children == child_task)
379 if (child_task->next_child != child_task)
380 task->children = child_task->next_child;
381 else
382 task->children = NULL;
384 gomp_clear_parent (child_task->children);
385 to_free = child_task;
386 child_task = NULL;
387 team->task_count--;
388 team->task_running_count--;
393 /* Called when encountering a taskyield directive. */
395 void
396 GOMP_taskyield (void)
398 /* Nothing at the moment. */
402 omp_in_final (void)
404 struct gomp_thread *thr = gomp_thread ();
405 return thr->task && thr->task->final_task;
408 ialias (omp_in_final)