2018-11-11 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libgomp / loop_ull.c
blobac658023e13baba57575479673f1a8d67e156818
1 /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
28 #include <limits.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include "libgomp.h"
33 ialias (GOMP_loop_ull_runtime_next)
34 ialias_redirect (GOMP_taskgroup_reduction_register)
36 typedef unsigned long long gomp_ull;
38 /* Initialize the given work share construct from the given arguments. */
40 static inline void
41 gomp_loop_ull_init (struct gomp_work_share *ws, bool up, gomp_ull start,
42 gomp_ull end, gomp_ull incr, enum gomp_schedule_type sched,
43 gomp_ull chunk_size)
45 ws->sched = sched;
46 ws->chunk_size_ull = chunk_size;
47 /* Canonicalize loops that have zero iterations to ->next == ->end. */
48 ws->end_ull = ((up && start > end) || (!up && start < end))
49 ? start : end;
50 ws->incr_ull = incr;
51 ws->next_ull = start;
52 ws->mode = 0;
53 if (sched == GFS_DYNAMIC)
55 ws->chunk_size_ull *= incr;
57 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
59 /* For dynamic scheduling prepare things to make each iteration
60 faster. */
61 struct gomp_thread *thr = gomp_thread ();
62 struct gomp_team *team = thr->ts.team;
63 long nthreads = team ? team->nthreads : 1;
65 if (__builtin_expect (up, 1))
67 /* Cheap overflow protection. */
68 if (__builtin_expect ((nthreads | ws->chunk_size_ull)
69 < 1ULL << (sizeof (gomp_ull)
70 * __CHAR_BIT__ / 2 - 1), 1))
71 ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1
72 - (nthreads + 1) * ws->chunk_size_ull);
74 /* Cheap overflow protection. */
75 else if (__builtin_expect ((nthreads | -ws->chunk_size_ull)
76 < 1ULL << (sizeof (gomp_ull)
77 * __CHAR_BIT__ / 2 - 1), 1))
78 ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull
79 - (__LONG_LONG_MAX__ * 2ULL + 1));
81 #endif
83 if (!up)
84 ws->mode |= 2;
87 /* The *_start routines are called when first encountering a loop construct
88 that is not bound directly to a parallel construct. The first thread
89 that arrives will create the work-share construct; subsequent threads
90 will see the construct exists and allocate work from it.
92 START, END, INCR are the bounds of the loop; due to the restrictions of
93 OpenMP, these values must be the same in every thread. This is not
94 verified (nor is it entirely verifiable, since START is not necessarily
95 retained intact in the work-share data structure). CHUNK_SIZE is the
96 scheduling parameter; again this must be identical in all threads.
98 Returns true if there's any work for this thread to perform. If so,
99 *ISTART and *IEND are filled with the bounds of the iteration block
100 allocated to this thread. Returns false if all work was assigned to
101 other threads prior to this thread's arrival. */
103 static bool
104 gomp_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end,
105 gomp_ull incr, gomp_ull chunk_size,
106 gomp_ull *istart, gomp_ull *iend)
108 struct gomp_thread *thr = gomp_thread ();
110 thr->ts.static_trip = 0;
111 if (gomp_work_share_start (0))
113 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
114 GFS_STATIC, chunk_size);
115 gomp_work_share_init_done ();
118 return !gomp_iter_ull_static_next (istart, iend);
121 static bool
122 gomp_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end,
123 gomp_ull incr, gomp_ull chunk_size,
124 gomp_ull *istart, gomp_ull *iend)
126 struct gomp_thread *thr = gomp_thread ();
127 bool ret;
129 if (gomp_work_share_start (0))
131 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
132 GFS_DYNAMIC, chunk_size);
133 gomp_work_share_init_done ();
136 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
137 ret = gomp_iter_ull_dynamic_next (istart, iend);
138 #else
139 gomp_mutex_lock (&thr->ts.work_share->lock);
140 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
141 gomp_mutex_unlock (&thr->ts.work_share->lock);
142 #endif
144 return ret;
147 static bool
148 gomp_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
149 gomp_ull incr, gomp_ull chunk_size,
150 gomp_ull *istart, gomp_ull *iend)
152 struct gomp_thread *thr = gomp_thread ();
153 bool ret;
155 if (gomp_work_share_start (0))
157 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
158 GFS_GUIDED, chunk_size);
159 gomp_work_share_init_done ();
162 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
163 ret = gomp_iter_ull_guided_next (istart, iend);
164 #else
165 gomp_mutex_lock (&thr->ts.work_share->lock);
166 ret = gomp_iter_ull_guided_next_locked (istart, iend);
167 gomp_mutex_unlock (&thr->ts.work_share->lock);
168 #endif
170 return ret;
173 bool
174 GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end,
175 gomp_ull incr, gomp_ull *istart, gomp_ull *iend)
177 struct gomp_task_icv *icv = gomp_icv (false);
178 switch (icv->run_sched_var & ~GFS_MONOTONIC)
180 case GFS_STATIC:
181 return gomp_loop_ull_static_start (up, start, end, incr,
182 icv->run_sched_chunk_size,
183 istart, iend);
184 case GFS_DYNAMIC:
185 return gomp_loop_ull_dynamic_start (up, start, end, incr,
186 icv->run_sched_chunk_size,
187 istart, iend);
188 case GFS_GUIDED:
189 return gomp_loop_ull_guided_start (up, start, end, incr,
190 icv->run_sched_chunk_size,
191 istart, iend);
192 case GFS_AUTO:
193 /* For now map to schedule(static), later on we could play with feedback
194 driven choice. */
195 return gomp_loop_ull_static_start (up, start, end, incr,
196 0, istart, iend);
197 default:
198 abort ();
202 static long
203 gomp_adjust_sched (long sched, gomp_ull *chunk_size)
205 sched &= ~GFS_MONOTONIC;
206 switch (sched)
208 case GFS_STATIC:
209 case GFS_DYNAMIC:
210 case GFS_GUIDED:
211 return sched;
212 /* GFS_RUNTIME is used for runtime schedule without monotonic
213 or nonmonotonic modifiers on the clause.
214 GFS_RUNTIME|GFS_MONOTONIC for runtime schedule with monotonic
215 modifier. */
216 case GFS_RUNTIME:
217 /* GFS_AUTO is used for runtime schedule with nonmonotonic
218 modifier. */
219 case GFS_AUTO:
221 struct gomp_task_icv *icv = gomp_icv (false);
222 sched = icv->run_sched_var & ~GFS_MONOTONIC;
223 switch (sched)
225 case GFS_STATIC:
226 case GFS_DYNAMIC:
227 case GFS_GUIDED:
228 *chunk_size = icv->run_sched_chunk_size;
229 break;
230 case GFS_AUTO:
231 sched = GFS_STATIC;
232 *chunk_size = 0;
233 break;
234 default:
235 abort ();
237 return sched;
239 default:
240 abort ();
244 bool
245 GOMP_loop_ull_start (bool up, gomp_ull start, gomp_ull end,
246 gomp_ull incr, long sched, gomp_ull chunk_size,
247 gomp_ull *istart, gomp_ull *iend,
248 uintptr_t *reductions, void **mem)
250 struct gomp_thread *thr = gomp_thread ();
252 thr->ts.static_trip = 0;
253 if (reductions)
254 gomp_workshare_taskgroup_start ();
255 if (gomp_work_share_start (0))
257 sched = gomp_adjust_sched (sched, &chunk_size);
258 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
259 sched, chunk_size);
260 if (reductions)
262 GOMP_taskgroup_reduction_register (reductions);
263 thr->task->taskgroup->workshare = true;
264 thr->ts.work_share->task_reductions = reductions;
266 if (mem)
268 uintptr_t size = (uintptr_t) *mem;
269 if (size > (sizeof (struct gomp_work_share)
270 - offsetof (struct gomp_work_share,
271 inline_ordered_team_ids)))
272 thr->ts.work_share->ordered_team_ids
273 = gomp_malloc_cleared (size);
274 else
275 memset (thr->ts.work_share->ordered_team_ids, '\0', size);
276 *mem = (void *) thr->ts.work_share->ordered_team_ids;
278 gomp_work_share_init_done ();
280 else
282 if (reductions)
284 uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
285 gomp_workshare_task_reduction_register (reductions,
286 first_reductions);
288 if (mem)
289 *mem = (void *) thr->ts.work_share->ordered_team_ids;
292 return ialias_call (GOMP_loop_ull_runtime_next) (istart, iend);
295 /* The *_ordered_*_start routines are similar. The only difference is that
296 this work-share construct is initialized to expect an ORDERED section. */
298 static bool
299 gomp_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
300 gomp_ull incr, gomp_ull chunk_size,
301 gomp_ull *istart, gomp_ull *iend)
303 struct gomp_thread *thr = gomp_thread ();
305 thr->ts.static_trip = 0;
306 if (gomp_work_share_start (1))
308 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
309 GFS_STATIC, chunk_size);
310 gomp_ordered_static_init ();
311 gomp_work_share_init_done ();
314 return !gomp_iter_ull_static_next (istart, iend);
317 static bool
318 gomp_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end,
319 gomp_ull incr, gomp_ull chunk_size,
320 gomp_ull *istart, gomp_ull *iend)
322 struct gomp_thread *thr = gomp_thread ();
323 bool ret;
325 if (gomp_work_share_start (1))
327 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
328 GFS_DYNAMIC, chunk_size);
329 gomp_mutex_lock (&thr->ts.work_share->lock);
330 gomp_work_share_init_done ();
332 else
333 gomp_mutex_lock (&thr->ts.work_share->lock);
335 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
336 if (ret)
337 gomp_ordered_first ();
338 gomp_mutex_unlock (&thr->ts.work_share->lock);
340 return ret;
343 static bool
344 gomp_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
345 gomp_ull incr, gomp_ull chunk_size,
346 gomp_ull *istart, gomp_ull *iend)
348 struct gomp_thread *thr = gomp_thread ();
349 bool ret;
351 if (gomp_work_share_start (1))
353 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
354 GFS_GUIDED, chunk_size);
355 gomp_mutex_lock (&thr->ts.work_share->lock);
356 gomp_work_share_init_done ();
358 else
359 gomp_mutex_lock (&thr->ts.work_share->lock);
361 ret = gomp_iter_ull_guided_next_locked (istart, iend);
362 if (ret)
363 gomp_ordered_first ();
364 gomp_mutex_unlock (&thr->ts.work_share->lock);
366 return ret;
369 bool
370 GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end,
371 gomp_ull incr, gomp_ull *istart,
372 gomp_ull *iend)
374 struct gomp_task_icv *icv = gomp_icv (false);
375 switch (icv->run_sched_var & ~GFS_MONOTONIC)
377 case GFS_STATIC:
378 return gomp_loop_ull_ordered_static_start (up, start, end, incr,
379 icv->run_sched_chunk_size,
380 istart, iend);
381 case GFS_DYNAMIC:
382 return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr,
383 icv->run_sched_chunk_size,
384 istart, iend);
385 case GFS_GUIDED:
386 return gomp_loop_ull_ordered_guided_start (up, start, end, incr,
387 icv->run_sched_chunk_size,
388 istart, iend);
389 case GFS_AUTO:
390 /* For now map to schedule(static), later on we could play with feedback
391 driven choice. */
392 return gomp_loop_ull_ordered_static_start (up, start, end, incr,
393 0, istart, iend);
394 default:
395 abort ();
399 bool
400 GOMP_loop_ull_ordered_start (bool up, gomp_ull start, gomp_ull end,
401 gomp_ull incr, long sched, gomp_ull chunk_size,
402 gomp_ull *istart, gomp_ull *iend,
403 uintptr_t *reductions, void **mem)
405 struct gomp_thread *thr = gomp_thread ();
406 size_t ordered = 1;
407 bool ret;
409 thr->ts.static_trip = 0;
410 if (reductions)
411 gomp_workshare_taskgroup_start ();
412 if (mem)
413 ordered += (uintptr_t) *mem;
414 if (gomp_work_share_start (ordered))
416 sched = gomp_adjust_sched (sched, &chunk_size);
417 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
418 sched, chunk_size);
419 if (reductions)
421 GOMP_taskgroup_reduction_register (reductions);
422 thr->task->taskgroup->workshare = true;
423 thr->ts.work_share->task_reductions = reductions;
425 if (sched == GFS_STATIC)
426 gomp_ordered_static_init ();
427 else
428 gomp_mutex_lock (&thr->ts.work_share->lock);
429 gomp_work_share_init_done ();
431 else
433 if (reductions)
435 uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
436 gomp_workshare_task_reduction_register (reductions,
437 first_reductions);
439 sched = thr->ts.work_share->sched;
440 if (sched != GFS_STATIC)
441 gomp_mutex_lock (&thr->ts.work_share->lock);
444 if (mem)
446 uintptr_t p
447 = (uintptr_t) (thr->ts.work_share->ordered_team_ids
448 + (thr->ts.team ? thr->ts.team->nthreads : 1));
449 p += __alignof__ (long long) - 1;
450 p &= ~(__alignof__ (long long) - 1);
451 *mem = (void *) p;
454 switch (sched)
456 case GFS_STATIC:
457 case GFS_AUTO:
458 return !gomp_iter_ull_static_next (istart, iend);
459 case GFS_DYNAMIC:
460 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
461 break;
462 case GFS_GUIDED:
463 ret = gomp_iter_ull_guided_next_locked (istart, iend);
464 break;
465 default:
466 abort ();
469 if (ret)
470 gomp_ordered_first ();
471 gomp_mutex_unlock (&thr->ts.work_share->lock);
472 return ret;
475 /* The *_doacross_*_start routines are similar. The only difference is that
476 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
477 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
478 and other COUNTS array elements tell the library number of iterations
479 in the ordered inner loops. */
481 static bool
482 gomp_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
483 gomp_ull chunk_size, gomp_ull *istart,
484 gomp_ull *iend)
486 struct gomp_thread *thr = gomp_thread ();
488 thr->ts.static_trip = 0;
489 if (gomp_work_share_start (0))
491 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
492 GFS_STATIC, chunk_size);
493 gomp_doacross_ull_init (ncounts, counts, chunk_size, 0);
494 gomp_work_share_init_done ();
497 return !gomp_iter_ull_static_next (istart, iend);
500 static bool
501 gomp_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
502 gomp_ull chunk_size, gomp_ull *istart,
503 gomp_ull *iend)
505 struct gomp_thread *thr = gomp_thread ();
506 bool ret;
508 if (gomp_work_share_start (0))
510 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
511 GFS_DYNAMIC, chunk_size);
512 gomp_doacross_ull_init (ncounts, counts, chunk_size, 0);
513 gomp_work_share_init_done ();
516 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
517 ret = gomp_iter_ull_dynamic_next (istart, iend);
518 #else
519 gomp_mutex_lock (&thr->ts.work_share->lock);
520 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
521 gomp_mutex_unlock (&thr->ts.work_share->lock);
522 #endif
524 return ret;
527 static bool
528 gomp_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
529 gomp_ull chunk_size, gomp_ull *istart,
530 gomp_ull *iend)
532 struct gomp_thread *thr = gomp_thread ();
533 bool ret;
535 if (gomp_work_share_start (0))
537 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
538 GFS_GUIDED, chunk_size);
539 gomp_doacross_ull_init (ncounts, counts, chunk_size, 0);
540 gomp_work_share_init_done ();
543 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
544 ret = gomp_iter_ull_guided_next (istart, iend);
545 #else
546 gomp_mutex_lock (&thr->ts.work_share->lock);
547 ret = gomp_iter_ull_guided_next_locked (istart, iend);
548 gomp_mutex_unlock (&thr->ts.work_share->lock);
549 #endif
551 return ret;
554 bool
555 GOMP_loop_ull_doacross_runtime_start (unsigned ncounts, gomp_ull *counts,
556 gomp_ull *istart, gomp_ull *iend)
558 struct gomp_task_icv *icv = gomp_icv (false);
559 switch (icv->run_sched_var & ~GFS_MONOTONIC)
561 case GFS_STATIC:
562 return gomp_loop_ull_doacross_static_start (ncounts, counts,
563 icv->run_sched_chunk_size,
564 istart, iend);
565 case GFS_DYNAMIC:
566 return gomp_loop_ull_doacross_dynamic_start (ncounts, counts,
567 icv->run_sched_chunk_size,
568 istart, iend);
569 case GFS_GUIDED:
570 return gomp_loop_ull_doacross_guided_start (ncounts, counts,
571 icv->run_sched_chunk_size,
572 istart, iend);
573 case GFS_AUTO:
574 /* For now map to schedule(static), later on we could play with feedback
575 driven choice. */
576 return gomp_loop_ull_doacross_static_start (ncounts, counts,
577 0, istart, iend);
578 default:
579 abort ();
583 bool
584 GOMP_loop_ull_doacross_start (unsigned ncounts, gomp_ull *counts,
585 long sched, gomp_ull chunk_size,
586 gomp_ull *istart, gomp_ull *iend,
587 uintptr_t *reductions, void **mem)
589 struct gomp_thread *thr = gomp_thread ();
591 thr->ts.static_trip = 0;
592 if (reductions)
593 gomp_workshare_taskgroup_start ();
594 if (gomp_work_share_start (0))
596 size_t extra = 0;
597 if (mem)
598 extra = (uintptr_t) *mem;
599 sched = gomp_adjust_sched (sched, &chunk_size);
600 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
601 sched, chunk_size);
602 gomp_doacross_ull_init (ncounts, counts, chunk_size, extra);
603 if (reductions)
605 GOMP_taskgroup_reduction_register (reductions);
606 thr->task->taskgroup->workshare = true;
607 thr->ts.work_share->task_reductions = reductions;
609 gomp_work_share_init_done ();
611 else
613 if (reductions)
615 uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
616 gomp_workshare_task_reduction_register (reductions,
617 first_reductions);
619 sched = thr->ts.work_share->sched;
622 if (mem)
623 *mem = thr->ts.work_share->doacross->extra;
625 return ialias_call (GOMP_loop_ull_runtime_next) (istart, iend);
628 /* The *_next routines are called when the thread completes processing of
629 the iteration block currently assigned to it. If the work-share
630 construct is bound directly to a parallel construct, then the iteration
631 bounds may have been set up before the parallel. In which case, this
632 may be the first iteration for the thread.
634 Returns true if there is work remaining to be performed; *ISTART and
635 *IEND are filled with a new iteration block. Returns false if all work
636 has been assigned. */
638 static bool
639 gomp_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
641 return !gomp_iter_ull_static_next (istart, iend);
644 static bool
645 gomp_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend)
647 bool ret;
649 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
650 ret = gomp_iter_ull_dynamic_next (istart, iend);
651 #else
652 struct gomp_thread *thr = gomp_thread ();
653 gomp_mutex_lock (&thr->ts.work_share->lock);
654 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
655 gomp_mutex_unlock (&thr->ts.work_share->lock);
656 #endif
658 return ret;
661 static bool
662 gomp_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
664 bool ret;
666 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
667 ret = gomp_iter_ull_guided_next (istart, iend);
668 #else
669 struct gomp_thread *thr = gomp_thread ();
670 gomp_mutex_lock (&thr->ts.work_share->lock);
671 ret = gomp_iter_ull_guided_next_locked (istart, iend);
672 gomp_mutex_unlock (&thr->ts.work_share->lock);
673 #endif
675 return ret;
678 bool
679 GOMP_loop_ull_runtime_next (gomp_ull *istart, gomp_ull *iend)
681 struct gomp_thread *thr = gomp_thread ();
683 switch (thr->ts.work_share->sched)
685 case GFS_STATIC:
686 case GFS_AUTO:
687 return gomp_loop_ull_static_next (istart, iend);
688 case GFS_DYNAMIC:
689 return gomp_loop_ull_dynamic_next (istart, iend);
690 case GFS_GUIDED:
691 return gomp_loop_ull_guided_next (istart, iend);
692 default:
693 abort ();
697 /* The *_ordered_*_next routines are called when the thread completes
698 processing of the iteration block currently assigned to it.
700 Returns true if there is work remaining to be performed; *ISTART and
701 *IEND are filled with a new iteration block. Returns false if all work
702 has been assigned. */
704 static bool
705 gomp_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
707 struct gomp_thread *thr = gomp_thread ();
708 int test;
710 gomp_ordered_sync ();
711 gomp_mutex_lock (&thr->ts.work_share->lock);
712 test = gomp_iter_ull_static_next (istart, iend);
713 if (test >= 0)
714 gomp_ordered_static_next ();
715 gomp_mutex_unlock (&thr->ts.work_share->lock);
717 return test == 0;
720 static bool
721 gomp_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend)
723 struct gomp_thread *thr = gomp_thread ();
724 bool ret;
726 gomp_ordered_sync ();
727 gomp_mutex_lock (&thr->ts.work_share->lock);
728 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
729 if (ret)
730 gomp_ordered_next ();
731 else
732 gomp_ordered_last ();
733 gomp_mutex_unlock (&thr->ts.work_share->lock);
735 return ret;
738 static bool
739 gomp_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend)
741 struct gomp_thread *thr = gomp_thread ();
742 bool ret;
744 gomp_ordered_sync ();
745 gomp_mutex_lock (&thr->ts.work_share->lock);
746 ret = gomp_iter_ull_guided_next_locked (istart, iend);
747 if (ret)
748 gomp_ordered_next ();
749 else
750 gomp_ordered_last ();
751 gomp_mutex_unlock (&thr->ts.work_share->lock);
753 return ret;
756 bool
757 GOMP_loop_ull_ordered_runtime_next (gomp_ull *istart, gomp_ull *iend)
759 struct gomp_thread *thr = gomp_thread ();
761 switch (thr->ts.work_share->sched)
763 case GFS_STATIC:
764 case GFS_AUTO:
765 return gomp_loop_ull_ordered_static_next (istart, iend);
766 case GFS_DYNAMIC:
767 return gomp_loop_ull_ordered_dynamic_next (istart, iend);
768 case GFS_GUIDED:
769 return gomp_loop_ull_ordered_guided_next (istart, iend);
770 default:
771 abort ();
775 /* We use static functions above so that we're sure that the "runtime"
776 function can defer to the proper routine without interposition. We
777 export the static function with a strong alias when possible, or with
778 a wrapper function otherwise. */
780 #ifdef HAVE_ATTRIBUTE_ALIAS
781 extern __typeof(gomp_loop_ull_static_start) GOMP_loop_ull_static_start
782 __attribute__((alias ("gomp_loop_ull_static_start")));
783 extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start
784 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
785 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start
786 __attribute__((alias ("gomp_loop_ull_guided_start")));
787 extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_nonmonotonic_dynamic_start
788 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
789 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_nonmonotonic_guided_start
790 __attribute__((alias ("gomp_loop_ull_guided_start")));
791 extern __typeof(GOMP_loop_ull_runtime_start) GOMP_loop_ull_nonmonotonic_runtime_start
792 __attribute__((alias ("GOMP_loop_ull_runtime_start")));
793 extern __typeof(GOMP_loop_ull_runtime_start) GOMP_loop_ull_maybe_nonmonotonic_runtime_start
794 __attribute__((alias ("GOMP_loop_ull_runtime_start")));
796 extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start
797 __attribute__((alias ("gomp_loop_ull_ordered_static_start")));
798 extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynamic_start
799 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start")));
800 extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start
801 __attribute__((alias ("gomp_loop_ull_ordered_guided_start")));
803 extern __typeof(gomp_loop_ull_doacross_static_start) GOMP_loop_ull_doacross_static_start
804 __attribute__((alias ("gomp_loop_ull_doacross_static_start")));
805 extern __typeof(gomp_loop_ull_doacross_dynamic_start) GOMP_loop_ull_doacross_dynamic_start
806 __attribute__((alias ("gomp_loop_ull_doacross_dynamic_start")));
807 extern __typeof(gomp_loop_ull_doacross_guided_start) GOMP_loop_ull_doacross_guided_start
808 __attribute__((alias ("gomp_loop_ull_doacross_guided_start")));
810 extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next
811 __attribute__((alias ("gomp_loop_ull_static_next")));
812 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next
813 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
814 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next
815 __attribute__((alias ("gomp_loop_ull_guided_next")));
816 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_nonmonotonic_dynamic_next
817 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
818 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_nonmonotonic_guided_next
819 __attribute__((alias ("gomp_loop_ull_guided_next")));
820 extern __typeof(GOMP_loop_ull_runtime_next) GOMP_loop_ull_nonmonotonic_runtime_next
821 __attribute__((alias ("GOMP_loop_ull_runtime_next")));
822 extern __typeof(GOMP_loop_ull_runtime_next) GOMP_loop_ull_maybe_nonmonotonic_runtime_next
823 __attribute__((alias ("GOMP_loop_ull_runtime_next")));
825 extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next
826 __attribute__((alias ("gomp_loop_ull_ordered_static_next")));
827 extern __typeof(gomp_loop_ull_ordered_dynamic_next) GOMP_loop_ull_ordered_dynamic_next
828 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next")));
829 extern __typeof(gomp_loop_ull_ordered_guided_next) GOMP_loop_ull_ordered_guided_next
830 __attribute__((alias ("gomp_loop_ull_ordered_guided_next")));
831 #else
832 bool
833 GOMP_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end,
834 gomp_ull incr, gomp_ull chunk_size,
835 gomp_ull *istart, gomp_ull *iend)
837 return gomp_loop_ull_static_start (up, start, end, incr, chunk_size, istart,
838 iend);
841 bool
842 GOMP_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end,
843 gomp_ull incr, gomp_ull chunk_size,
844 gomp_ull *istart, gomp_ull *iend)
846 return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
847 iend);
850 bool
851 GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
852 gomp_ull incr, gomp_ull chunk_size,
853 gomp_ull *istart, gomp_ull *iend)
855 return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
856 iend);
859 bool
860 GOMP_loop_ull_nonmonotonic_dynamic_start (bool up, gomp_ull start,
861 gomp_ull end, gomp_ull incr,
862 gomp_ull chunk_size,
863 gomp_ull *istart, gomp_ull *iend)
865 return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
866 iend);
869 bool
870 GOMP_loop_ull_nonmonotonic_guided_start (bool up, gomp_ull start, gomp_ull end,
871 gomp_ull incr, gomp_ull chunk_size,
872 gomp_ull *istart, gomp_ull *iend)
874 return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
875 iend);
878 bool
879 GOMP_loop_ull_nonmonotonic_runtime_start (bool up, gomp_ull start,
880 gomp_ull end, gomp_ull incr,
881 gomp_ull *istart, gomp_ull *iend)
883 return GOMP_loop_ull_runtime_start (up, start, end, incr, istart, iend);
886 bool
887 GOMP_loop_ull_maybe_nonmonotonic_runtime_start (bool up, gomp_ull start,
888 gomp_ull end, gomp_ull incr,
889 gomp_ull *istart,
890 gomp_ull *iend)
892 return GOMP_loop_ull_runtime_start (up, start, end, incr, istart, iend);
895 bool
896 GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
897 gomp_ull incr, gomp_ull chunk_size,
898 gomp_ull *istart, gomp_ull *iend)
900 return gomp_loop_ull_ordered_static_start (up, start, end, incr, chunk_size,
901 istart, iend);
904 bool
905 GOMP_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end,
906 gomp_ull incr, gomp_ull chunk_size,
907 gomp_ull *istart, gomp_ull *iend)
909 return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, chunk_size,
910 istart, iend);
913 bool
914 GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
915 gomp_ull incr, gomp_ull chunk_size,
916 gomp_ull *istart, gomp_ull *iend)
918 return gomp_loop_ull_ordered_guided_start (up, start, end, incr, chunk_size,
919 istart, iend);
922 bool
923 GOMP_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
924 gomp_ull chunk_size, gomp_ull *istart,
925 gomp_ull *iend)
927 return gomp_loop_ull_doacross_static_start (ncounts, counts, chunk_size,
928 istart, iend);
931 bool
932 GOMP_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
933 gomp_ull chunk_size, gomp_ull *istart,
934 gomp_ull *iend)
936 return gomp_loop_ull_doacross_dynamic_start (ncounts, counts, chunk_size,
937 istart, iend);
940 bool
941 GOMP_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
942 gomp_ull chunk_size, gomp_ull *istart,
943 gomp_ull *iend)
945 return gomp_loop_ull_doacross_guided_start (ncounts, counts, chunk_size,
946 istart, iend);
949 bool
950 GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
952 return gomp_loop_ull_static_next (istart, iend);
955 bool
956 GOMP_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend)
958 return gomp_loop_ull_dynamic_next (istart, iend);
961 bool
962 GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
964 return gomp_loop_ull_guided_next (istart, iend);
967 bool
968 GOMP_loop_ull_nonmonotonic_dynamic_next (gomp_ull *istart, gomp_ull *iend)
970 return gomp_loop_ull_dynamic_next (istart, iend);
973 bool
974 GOMP_loop_ull_nonmonotonic_guided_next (gomp_ull *istart, gomp_ull *iend)
976 return gomp_loop_ull_guided_next (istart, iend);
979 bool
980 GOMP_loop_ull_nonmonotonic_runtime_next (gomp_ull *istart, gomp_ull *iend)
982 return GOMP_loop_ull_runtime_next (istart, iend);
985 bool
986 GOMP_loop_ull_maybe_nonmonotonic_runtime_next (gomp_ull *istart,
987 gomp_ull *iend)
989 return GOMP_loop_ull_runtime_next (istart, iend);
992 bool
993 GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
995 return gomp_loop_ull_ordered_static_next (istart, iend);
998 bool
999 GOMP_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend)
1001 return gomp_loop_ull_ordered_dynamic_next (istart, iend);
1004 bool
1005 GOMP_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend)
1007 return gomp_loop_ull_ordered_guided_next (istart, iend);
1009 #endif