1 /* Copyright (C) 2015-2023 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the taskloop construct. It is included twice, once
27 for the long and once for unsigned long long variant. */
29 /* Called when encountering an explicit task directive. If IF_CLAUSE is
30 false, then we must not delay in executing the task. If UNTIED is true,
31 then the task may be executed by any member of the team. */
34 GOMP_taskloop (void (*fn
) (void *), void *data
, void (*cpyfn
) (void *, void *),
35 long arg_size
, long arg_align
, unsigned flags
,
36 unsigned long num_tasks
, int priority
,
37 TYPE start
, TYPE end
, TYPE step
)
39 struct gomp_thread
*thr
= gomp_thread ();
40 struct gomp_team
*team
= thr
->ts
.team
;
42 #ifdef HAVE_BROKEN_POSIX_SEMAPHORES
43 /* If pthread_mutex_* is used for omp_*lock*, then each task must be
44 tied to one thread all the time. This means UNTIED tasks must be
45 tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN
46 might be running on different thread than FN. */
48 flags
&= ~GOMP_TASK_FLAG_IF
;
49 flags
&= ~GOMP_TASK_FLAG_UNTIED
;
52 /* If parallel or taskgroup has been cancelled, don't start new tasks. */
53 if (team
&& gomp_team_barrier_cancelled (&team
->barrier
))
56 if ((flags
& (GOMP_TASK_FLAG_NOGROUP
| GOMP_TASK_FLAG_REDUCTION
))
57 == GOMP_TASK_FLAG_REDUCTION
)
59 struct gomp_data_head
{ TYPE t1
, t2
; uintptr_t *ptr
; };
60 uintptr_t *ptr
= ((struct gomp_data_head
*) data
)->ptr
;
61 /* Tell callers GOMP_taskgroup_reduction_register has not been
82 UTYPE n
= (end
- start
+ s
) / step
;
85 if (flags
& GOMP_TASK_FLAG_UP
)
89 n
= (end
- start
+ step
- 1) / step
;
95 n
= (start
- end
- step
- 1) / -step
;
99 TYPE task_step
= step
;
100 TYPE nfirst_task_step
= step
;
101 unsigned long nfirst
= n
;
102 if (flags
& GOMP_TASK_FLAG_GRAINSIZE
)
104 unsigned long grainsize
= num_tasks
;
106 num_tasks
= n
/ grainsize
;
108 UTYPE ndiv
= n
/ grainsize
;
110 if (num_tasks
!= ndiv
)
113 if ((flags
& GOMP_TASK_FLAG_STRICT
)
114 && num_tasks
!= ~0ULL)
116 UTYPE mod
= n
% grainsize
;
117 task_step
= (TYPE
) grainsize
* step
;
121 nfirst_task_step
= (TYPE
) mod
* step
;
123 task_step
= nfirst_task_step
;
125 nfirst
= num_tasks
- 2;
128 else if (num_tasks
<= 1)
131 task_step
= end
- start
;
133 else if (num_tasks
>= grainsize
139 UTYPE mul
= num_tasks
* grainsize
;
140 task_step
= (TYPE
) grainsize
* step
;
143 nfirst_task_step
= task_step
;
145 nfirst
= n
- mul
- 1;
150 UTYPE div
= n
/ num_tasks
;
151 UTYPE mod
= n
% num_tasks
;
152 task_step
= (TYPE
) div
* step
;
155 nfirst_task_step
= task_step
;
164 num_tasks
= team
? team
->nthreads
: 1;
169 UTYPE div
= n
/ num_tasks
;
170 UTYPE mod
= n
% num_tasks
;
171 task_step
= (TYPE
) div
* step
;
174 nfirst_task_step
= task_step
;
181 if (flags
& GOMP_TASK_FLAG_NOGROUP
)
183 if (__builtin_expect (gomp_cancel_var
, 0)
185 && thr
->task
->taskgroup
)
187 if (thr
->task
->taskgroup
->cancelled
)
189 if (thr
->task
->taskgroup
->workshare
190 && thr
->task
->taskgroup
->prev
191 && thr
->task
->taskgroup
->prev
->cancelled
)
197 ialias_call (GOMP_taskgroup_start
) ();
198 if (flags
& GOMP_TASK_FLAG_REDUCTION
)
200 struct gomp_data_head
{ TYPE t1
, t2
; uintptr_t *ptr
; };
201 uintptr_t *ptr
= ((struct gomp_data_head
*) data
)->ptr
;
202 ialias_call (GOMP_taskgroup_reduction_register
) (ptr
);
206 if (priority
> gomp_max_task_priority_var
)
207 priority
= gomp_max_task_priority_var
;
209 if ((flags
& GOMP_TASK_FLAG_IF
) == 0 || team
== NULL
210 || (thr
->task
&& thr
->task
->final_task
)
211 || team
->task_count
+ num_tasks
> 64 * team
->nthreads
)
214 if (__builtin_expect (cpyfn
!= NULL
, 0))
216 struct gomp_task task
[num_tasks
];
217 struct gomp_task
*parent
= thr
->task
;
218 arg_size
= (arg_size
+ arg_align
- 1) & ~(arg_align
- 1);
219 char buf
[num_tasks
* arg_size
+ arg_align
- 1];
220 char *arg
= (char *) (((uintptr_t) buf
+ arg_align
- 1)
221 & ~(uintptr_t) (arg_align
- 1));
222 char *orig_arg
= arg
;
223 for (i
= 0; i
< num_tasks
; i
++)
225 gomp_init_task (&task
[i
], parent
, gomp_icv (false));
226 task
[i
].priority
= priority
;
227 task
[i
].kind
= GOMP_TASK_UNDEFERRED
;
228 task
[i
].final_task
= (thr
->task
&& thr
->task
->final_task
)
229 || (flags
& GOMP_TASK_FLAG_FINAL
);
232 task
[i
].in_tied_task
= thr
->task
->in_tied_task
;
233 task
[i
].taskgroup
= thr
->task
->taskgroup
;
235 thr
->task
= &task
[i
];
240 for (i
= 0; i
< num_tasks
; i
++)
242 thr
->task
= &task
[i
];
243 ((TYPE
*)arg
)[0] = start
;
245 ((TYPE
*)arg
)[1] = start
;
247 task_step
= nfirst_task_step
;
250 if (!priority_queue_empty_p (&task
[i
].children_queue
,
253 gomp_mutex_lock (&team
->task_lock
);
254 gomp_clear_parent (&task
[i
].children_queue
);
255 gomp_mutex_unlock (&team
->task_lock
);
261 for (i
= 0; i
< num_tasks
; i
++)
263 struct gomp_task task
;
265 gomp_init_task (&task
, thr
->task
, gomp_icv (false));
266 task
.priority
= priority
;
267 task
.kind
= GOMP_TASK_UNDEFERRED
;
268 task
.final_task
= (thr
->task
&& thr
->task
->final_task
)
269 || (flags
& GOMP_TASK_FLAG_FINAL
);
272 task
.in_tied_task
= thr
->task
->in_tied_task
;
273 task
.taskgroup
= thr
->task
->taskgroup
;
276 ((TYPE
*)data
)[0] = start
;
278 ((TYPE
*)data
)[1] = start
;
280 task_step
= nfirst_task_step
;
282 if (!priority_queue_empty_p (&task
.children_queue
,
285 gomp_mutex_lock (&team
->task_lock
);
286 gomp_clear_parent (&task
.children_queue
);
287 gomp_mutex_unlock (&team
->task_lock
);
294 struct gomp_task
*tasks
[num_tasks
];
295 struct gomp_task
*parent
= thr
->task
;
296 struct gomp_taskgroup
*taskgroup
= parent
->taskgroup
;
301 for (i
= 0; i
< num_tasks
; i
++)
303 struct gomp_task
*task
304 = gomp_malloc (sizeof (*task
) + arg_size
+ arg_align
- 1);
306 arg
= (char *) (((uintptr_t) (task
+ 1) + arg_align
- 1)
307 & ~(uintptr_t) (arg_align
- 1));
308 gomp_init_task (task
, parent
, gomp_icv (false));
309 task
->priority
= priority
;
310 task
->kind
= GOMP_TASK_UNDEFERRED
;
311 task
->in_tied_task
= parent
->in_tied_task
;
312 task
->taskgroup
= taskgroup
;
317 task
->copy_ctors_done
= true;
320 memcpy (arg
, data
, arg_size
);
321 ((TYPE
*)arg
)[0] = start
;
323 ((TYPE
*)arg
)[1] = start
;
325 task_step
= nfirst_task_step
;
327 task
->kind
= GOMP_TASK_WAITING
;
330 task
->final_task
= (flags
& GOMP_TASK_FLAG_FINAL
) >> 1;
332 gomp_mutex_lock (&team
->task_lock
);
333 /* If parallel or taskgroup has been cancelled, don't start new
335 if (__builtin_expect (gomp_cancel_var
, 0)
338 if (gomp_team_barrier_cancelled (&team
->barrier
))
341 gomp_mutex_unlock (&team
->task_lock
);
342 for (i
= 0; i
< num_tasks
; i
++)
344 gomp_finish_task (tasks
[i
]);
347 if ((flags
& GOMP_TASK_FLAG_NOGROUP
) == 0)
348 ialias_call (GOMP_taskgroup_end
) ();
353 if (taskgroup
->cancelled
)
355 if (taskgroup
->workshare
357 && taskgroup
->prev
->cancelled
)
362 taskgroup
->num_children
+= num_tasks
;
363 for (i
= 0; i
< num_tasks
; i
++)
365 struct gomp_task
*task
= tasks
[i
];
366 priority_queue_insert (PQ_CHILDREN
, &parent
->children_queue
,
368 PRIORITY_INSERT_BEGIN
,
369 /*last_parent_depends_on=*/false,
370 task
->parent_depends_on
);
372 priority_queue_insert (PQ_TASKGROUP
, &taskgroup
->taskgroup_queue
,
373 task
, priority
, PRIORITY_INSERT_BEGIN
,
374 /*last_parent_depends_on=*/false,
375 task
->parent_depends_on
);
376 priority_queue_insert (PQ_TEAM
, &team
->task_queue
, task
, priority
,
378 /*last_parent_depends_on=*/false,
379 task
->parent_depends_on
);
381 ++team
->task_queued_count
;
383 gomp_team_barrier_set_task_pending (&team
->barrier
);
384 if (team
->task_running_count
+ !parent
->in_tied_task
387 do_wake
= team
->nthreads
- team
->task_running_count
388 - !parent
->in_tied_task
;
389 if ((unsigned long) do_wake
> num_tasks
)
394 gomp_mutex_unlock (&team
->task_lock
);
396 gomp_team_barrier_wake (&team
->barrier
, do_wake
);
398 if ((flags
& GOMP_TASK_FLAG_NOGROUP
) == 0)
399 ialias_call (GOMP_taskgroup_end
) ();