1 /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
33 /* Initialize the given work share construct from the given arguments. */
36 gomp_loop_init (struct gomp_work_share
*ws
, long start
, long end
, long incr
,
37 enum gomp_schedule_type sched
, long chunk_size
)
40 ws
->chunk_size
= chunk_size
;
41 /* Canonicalize loops that have zero iterations to ->next == ->end. */
42 ws
->end
= ((incr
> 0 && start
> end
) || (incr
< 0 && start
< end
))
46 if (sched
== GFS_DYNAMIC
)
48 ws
->chunk_size
*= incr
;
50 #ifdef HAVE_SYNC_BUILTINS
52 /* For dynamic scheduling prepare things to make each iteration
54 struct gomp_thread
*thr
= gomp_thread ();
55 struct gomp_team
*team
= thr
->ts
.team
;
56 long nthreads
= team
? team
->nthreads
: 1;
58 if (__builtin_expect (incr
> 0, 1))
60 /* Cheap overflow protection. */
61 if (__builtin_expect ((nthreads
| ws
->chunk_size
)
62 >= 1UL << (sizeof (long)
63 * __CHAR_BIT__
/ 2 - 1), 0))
66 ws
->mode
= ws
->end
< (LONG_MAX
67 - (nthreads
+ 1) * ws
->chunk_size
);
69 /* Cheap overflow protection. */
70 else if (__builtin_expect ((nthreads
| -ws
->chunk_size
)
71 >= 1UL << (sizeof (long)
72 * __CHAR_BIT__
/ 2 - 1), 0))
75 ws
->mode
= ws
->end
> (nthreads
+ 1) * -ws
->chunk_size
- LONG_MAX
;
81 /* The *_start routines are called when first encountering a loop construct
82 that is not bound directly to a parallel construct. The first thread
83 that arrives will create the work-share construct; subsequent threads
84 will see the construct exists and allocate work from it.
86 START, END, INCR are the bounds of the loop; due to the restrictions of
87 OpenMP, these values must be the same in every thread. This is not
88 verified (nor is it entirely verifiable, since START is not necessarily
89 retained intact in the work-share data structure). CHUNK_SIZE is the
90 scheduling parameter; again this must be identical in all threads.
92 Returns true if there's any work for this thread to perform. If so,
93 *ISTART and *IEND are filled with the bounds of the iteration block
94 allocated to this thread. Returns false if all work was assigned to
95 other threads prior to this thread's arrival. */
98 gomp_loop_static_start (long start
, long end
, long incr
, long chunk_size
,
99 long *istart
, long *iend
)
101 struct gomp_thread
*thr
= gomp_thread ();
103 thr
->ts
.static_trip
= 0;
104 if (gomp_work_share_start (false))
106 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
107 GFS_STATIC
, chunk_size
);
108 gomp_work_share_init_done ();
111 return !gomp_iter_static_next (istart
, iend
);
114 /* The current dynamic implementation is always monotonic. The
115 entrypoints without nonmonotonic in them have to be always monotonic,
116 but the nonmonotonic ones could be changed to use work-stealing for
117 improved scalability. */
120 gomp_loop_dynamic_start (long start
, long end
, long incr
, long chunk_size
,
121 long *istart
, long *iend
)
123 struct gomp_thread
*thr
= gomp_thread ();
126 if (gomp_work_share_start (false))
128 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
129 GFS_DYNAMIC
, chunk_size
);
130 gomp_work_share_init_done ();
133 #ifdef HAVE_SYNC_BUILTINS
134 ret
= gomp_iter_dynamic_next (istart
, iend
);
136 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
137 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
138 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
144 /* Similarly as for dynamic, though the question is how can the chunk sizes
145 be decreased without a central locking or atomics. */
148 gomp_loop_guided_start (long start
, long end
, long incr
, long chunk_size
,
149 long *istart
, long *iend
)
151 struct gomp_thread
*thr
= gomp_thread ();
154 if (gomp_work_share_start (false))
156 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
157 GFS_GUIDED
, chunk_size
);
158 gomp_work_share_init_done ();
161 #ifdef HAVE_SYNC_BUILTINS
162 ret
= gomp_iter_guided_next (istart
, iend
);
164 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
165 ret
= gomp_iter_guided_next_locked (istart
, iend
);
166 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
173 GOMP_loop_runtime_start (long start
, long end
, long incr
,
174 long *istart
, long *iend
)
176 struct gomp_task_icv
*icv
= gomp_icv (false);
177 switch (icv
->run_sched_var
)
180 return gomp_loop_static_start (start
, end
, incr
,
181 icv
->run_sched_chunk_size
,
184 return gomp_loop_dynamic_start (start
, end
, incr
,
185 icv
->run_sched_chunk_size
,
188 return gomp_loop_guided_start (start
, end
, incr
,
189 icv
->run_sched_chunk_size
,
192 /* For now map to schedule(static), later on we could play with feedback
194 return gomp_loop_static_start (start
, end
, incr
, 0, istart
, iend
);
200 /* The *_ordered_*_start routines are similar. The only difference is that
201 this work-share construct is initialized to expect an ORDERED section. */
204 gomp_loop_ordered_static_start (long start
, long end
, long incr
,
205 long chunk_size
, long *istart
, long *iend
)
207 struct gomp_thread
*thr
= gomp_thread ();
209 thr
->ts
.static_trip
= 0;
210 if (gomp_work_share_start (true))
212 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
213 GFS_STATIC
, chunk_size
);
214 gomp_ordered_static_init ();
215 gomp_work_share_init_done ();
218 return !gomp_iter_static_next (istart
, iend
);
222 gomp_loop_ordered_dynamic_start (long start
, long end
, long incr
,
223 long chunk_size
, long *istart
, long *iend
)
225 struct gomp_thread
*thr
= gomp_thread ();
228 if (gomp_work_share_start (true))
230 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
231 GFS_DYNAMIC
, chunk_size
);
232 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
233 gomp_work_share_init_done ();
236 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
238 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
240 gomp_ordered_first ();
241 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
247 gomp_loop_ordered_guided_start (long start
, long end
, long incr
,
248 long chunk_size
, long *istart
, long *iend
)
250 struct gomp_thread
*thr
= gomp_thread ();
253 if (gomp_work_share_start (true))
255 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
256 GFS_GUIDED
, chunk_size
);
257 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
258 gomp_work_share_init_done ();
261 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
263 ret
= gomp_iter_guided_next_locked (istart
, iend
);
265 gomp_ordered_first ();
266 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
272 GOMP_loop_ordered_runtime_start (long start
, long end
, long incr
,
273 long *istart
, long *iend
)
275 struct gomp_task_icv
*icv
= gomp_icv (false);
276 switch (icv
->run_sched_var
)
279 return gomp_loop_ordered_static_start (start
, end
, incr
,
280 icv
->run_sched_chunk_size
,
283 return gomp_loop_ordered_dynamic_start (start
, end
, incr
,
284 icv
->run_sched_chunk_size
,
287 return gomp_loop_ordered_guided_start (start
, end
, incr
,
288 icv
->run_sched_chunk_size
,
291 /* For now map to schedule(static), later on we could play with feedback
293 return gomp_loop_ordered_static_start (start
, end
, incr
,
300 /* The *_doacross_*_start routines are similar. The only difference is that
301 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
302 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
303 and other COUNTS array elements tell the library number of iterations
304 in the ordered inner loops. */
307 gomp_loop_doacross_static_start (unsigned ncounts
, long *counts
,
308 long chunk_size
, long *istart
, long *iend
)
310 struct gomp_thread
*thr
= gomp_thread ();
312 thr
->ts
.static_trip
= 0;
313 if (gomp_work_share_start (false))
315 gomp_loop_init (thr
->ts
.work_share
, 0, counts
[0], 1,
316 GFS_STATIC
, chunk_size
);
317 gomp_doacross_init (ncounts
, counts
, chunk_size
);
318 gomp_work_share_init_done ();
321 return !gomp_iter_static_next (istart
, iend
);
325 gomp_loop_doacross_dynamic_start (unsigned ncounts
, long *counts
,
326 long chunk_size
, long *istart
, long *iend
)
328 struct gomp_thread
*thr
= gomp_thread ();
331 if (gomp_work_share_start (false))
333 gomp_loop_init (thr
->ts
.work_share
, 0, counts
[0], 1,
334 GFS_DYNAMIC
, chunk_size
);
335 gomp_doacross_init (ncounts
, counts
, chunk_size
);
336 gomp_work_share_init_done ();
339 #ifdef HAVE_SYNC_BUILTINS
340 ret
= gomp_iter_dynamic_next (istart
, iend
);
342 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
343 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
344 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
351 gomp_loop_doacross_guided_start (unsigned ncounts
, long *counts
,
352 long chunk_size
, long *istart
, long *iend
)
354 struct gomp_thread
*thr
= gomp_thread ();
357 if (gomp_work_share_start (false))
359 gomp_loop_init (thr
->ts
.work_share
, 0, counts
[0], 1,
360 GFS_GUIDED
, chunk_size
);
361 gomp_doacross_init (ncounts
, counts
, chunk_size
);
362 gomp_work_share_init_done ();
365 #ifdef HAVE_SYNC_BUILTINS
366 ret
= gomp_iter_guided_next (istart
, iend
);
368 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
369 ret
= gomp_iter_guided_next_locked (istart
, iend
);
370 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
377 GOMP_loop_doacross_runtime_start (unsigned ncounts
, long *counts
,
378 long *istart
, long *iend
)
380 struct gomp_task_icv
*icv
= gomp_icv (false);
381 switch (icv
->run_sched_var
)
384 return gomp_loop_doacross_static_start (ncounts
, counts
,
385 icv
->run_sched_chunk_size
,
388 return gomp_loop_doacross_dynamic_start (ncounts
, counts
,
389 icv
->run_sched_chunk_size
,
392 return gomp_loop_doacross_guided_start (ncounts
, counts
,
393 icv
->run_sched_chunk_size
,
396 /* For now map to schedule(static), later on we could play with feedback
398 return gomp_loop_doacross_static_start (ncounts
, counts
,
405 /* The *_next routines are called when the thread completes processing of
406 the iteration block currently assigned to it. If the work-share
407 construct is bound directly to a parallel construct, then the iteration
408 bounds may have been set up before the parallel. In which case, this
409 may be the first iteration for the thread.
411 Returns true if there is work remaining to be performed; *ISTART and
412 *IEND are filled with a new iteration block. Returns false if all work
413 has been assigned. */
416 gomp_loop_static_next (long *istart
, long *iend
)
418 return !gomp_iter_static_next (istart
, iend
);
422 gomp_loop_dynamic_next (long *istart
, long *iend
)
426 #ifdef HAVE_SYNC_BUILTINS
427 ret
= gomp_iter_dynamic_next (istart
, iend
);
429 struct gomp_thread
*thr
= gomp_thread ();
430 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
431 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
432 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
439 gomp_loop_guided_next (long *istart
, long *iend
)
443 #ifdef HAVE_SYNC_BUILTINS
444 ret
= gomp_iter_guided_next (istart
, iend
);
446 struct gomp_thread
*thr
= gomp_thread ();
447 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
448 ret
= gomp_iter_guided_next_locked (istart
, iend
);
449 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
456 GOMP_loop_runtime_next (long *istart
, long *iend
)
458 struct gomp_thread
*thr
= gomp_thread ();
460 switch (thr
->ts
.work_share
->sched
)
464 return gomp_loop_static_next (istart
, iend
);
466 return gomp_loop_dynamic_next (istart
, iend
);
468 return gomp_loop_guided_next (istart
, iend
);
474 /* The *_ordered_*_next routines are called when the thread completes
475 processing of the iteration block currently assigned to it.
477 Returns true if there is work remaining to be performed; *ISTART and
478 *IEND are filled with a new iteration block. Returns false if all work
479 has been assigned. */
482 gomp_loop_ordered_static_next (long *istart
, long *iend
)
484 struct gomp_thread
*thr
= gomp_thread ();
487 gomp_ordered_sync ();
488 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
489 test
= gomp_iter_static_next (istart
, iend
);
491 gomp_ordered_static_next ();
492 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
498 gomp_loop_ordered_dynamic_next (long *istart
, long *iend
)
500 struct gomp_thread
*thr
= gomp_thread ();
503 gomp_ordered_sync ();
504 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
505 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
507 gomp_ordered_next ();
509 gomp_ordered_last ();
510 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
516 gomp_loop_ordered_guided_next (long *istart
, long *iend
)
518 struct gomp_thread
*thr
= gomp_thread ();
521 gomp_ordered_sync ();
522 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
523 ret
= gomp_iter_guided_next_locked (istart
, iend
);
525 gomp_ordered_next ();
527 gomp_ordered_last ();
528 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
534 GOMP_loop_ordered_runtime_next (long *istart
, long *iend
)
536 struct gomp_thread
*thr
= gomp_thread ();
538 switch (thr
->ts
.work_share
->sched
)
542 return gomp_loop_ordered_static_next (istart
, iend
);
544 return gomp_loop_ordered_dynamic_next (istart
, iend
);
546 return gomp_loop_ordered_guided_next (istart
, iend
);
552 /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
553 to avoid one synchronization once we get into the loop. */
556 gomp_parallel_loop_start (void (*fn
) (void *), void *data
,
557 unsigned num_threads
, long start
, long end
,
558 long incr
, enum gomp_schedule_type sched
,
559 long chunk_size
, unsigned int flags
)
561 struct gomp_team
*team
;
563 num_threads
= gomp_resolve_num_threads (num_threads
, 0);
564 team
= gomp_new_team (num_threads
);
565 gomp_loop_init (&team
->work_shares
[0], start
, end
, incr
, sched
, chunk_size
);
566 gomp_team_start (fn
, data
, num_threads
, flags
, team
);
570 GOMP_parallel_loop_static_start (void (*fn
) (void *), void *data
,
571 unsigned num_threads
, long start
, long end
,
572 long incr
, long chunk_size
)
574 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
575 GFS_STATIC
, chunk_size
, 0);
579 GOMP_parallel_loop_dynamic_start (void (*fn
) (void *), void *data
,
580 unsigned num_threads
, long start
, long end
,
581 long incr
, long chunk_size
)
583 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
584 GFS_DYNAMIC
, chunk_size
, 0);
588 GOMP_parallel_loop_guided_start (void (*fn
) (void *), void *data
,
589 unsigned num_threads
, long start
, long end
,
590 long incr
, long chunk_size
)
592 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
593 GFS_GUIDED
, chunk_size
, 0);
597 GOMP_parallel_loop_runtime_start (void (*fn
) (void *), void *data
,
598 unsigned num_threads
, long start
, long end
,
601 struct gomp_task_icv
*icv
= gomp_icv (false);
602 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
603 icv
->run_sched_var
, icv
->run_sched_chunk_size
, 0);
606 ialias_redirect (GOMP_parallel_end
)
609 GOMP_parallel_loop_static (void (*fn
) (void *), void *data
,
610 unsigned num_threads
, long start
, long end
,
611 long incr
, long chunk_size
, unsigned flags
)
613 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
614 GFS_STATIC
, chunk_size
, flags
);
616 GOMP_parallel_end ();
620 GOMP_parallel_loop_dynamic (void (*fn
) (void *), void *data
,
621 unsigned num_threads
, long start
, long end
,
622 long incr
, long chunk_size
, unsigned flags
)
624 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
625 GFS_DYNAMIC
, chunk_size
, flags
);
627 GOMP_parallel_end ();
631 GOMP_parallel_loop_guided (void (*fn
) (void *), void *data
,
632 unsigned num_threads
, long start
, long end
,
633 long incr
, long chunk_size
, unsigned flags
)
635 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
636 GFS_GUIDED
, chunk_size
, flags
);
638 GOMP_parallel_end ();
641 #ifdef HAVE_ATTRIBUTE_ALIAS
642 extern __typeof(GOMP_parallel_loop_dynamic
) GOMP_parallel_loop_nonmonotonic_dynamic
643 __attribute__((alias ("GOMP_parallel_loop_dynamic")));
644 extern __typeof(GOMP_parallel_loop_guided
) GOMP_parallel_loop_nonmonotonic_guided
645 __attribute__((alias ("GOMP_parallel_loop_guided")));
648 GOMP_parallel_loop_nonmonotonic_dynamic (void (*fn
) (void *), void *data
,
649 unsigned num_threads
, long start
,
650 long end
, long incr
, long chunk_size
,
653 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
654 GFS_DYNAMIC
, chunk_size
, flags
);
656 GOMP_parallel_end ();
660 GOMP_parallel_loop_nonmonotonic_guided (void (*fn
) (void *), void *data
,
661 unsigned num_threads
, long start
,
662 long end
, long incr
, long chunk_size
,
665 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
666 GFS_GUIDED
, chunk_size
, flags
);
668 GOMP_parallel_end ();
673 GOMP_parallel_loop_runtime (void (*fn
) (void *), void *data
,
674 unsigned num_threads
, long start
, long end
,
675 long incr
, unsigned flags
)
677 struct gomp_task_icv
*icv
= gomp_icv (false);
678 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
679 icv
->run_sched_var
, icv
->run_sched_chunk_size
,
682 GOMP_parallel_end ();
685 /* The GOMP_loop_end* routines are called after the thread is told that
686 all loop iterations are complete. The first two versions synchronize
687 all threads; the nowait version does not. */
692 gomp_work_share_end ();
696 GOMP_loop_end_cancel (void)
698 return gomp_work_share_end_cancel ();
702 GOMP_loop_end_nowait (void)
704 gomp_work_share_end_nowait ();
708 /* We use static functions above so that we're sure that the "runtime"
709 function can defer to the proper routine without interposition. We
710 export the static function with a strong alias when possible, or with
711 a wrapper function otherwise. */
713 #ifdef HAVE_ATTRIBUTE_ALIAS
714 extern __typeof(gomp_loop_static_start
) GOMP_loop_static_start
715 __attribute__((alias ("gomp_loop_static_start")));
716 extern __typeof(gomp_loop_dynamic_start
) GOMP_loop_dynamic_start
717 __attribute__((alias ("gomp_loop_dynamic_start")));
718 extern __typeof(gomp_loop_guided_start
) GOMP_loop_guided_start
719 __attribute__((alias ("gomp_loop_guided_start")));
720 extern __typeof(gomp_loop_dynamic_start
) GOMP_loop_nonmonotonic_dynamic_start
721 __attribute__((alias ("gomp_loop_dynamic_start")));
722 extern __typeof(gomp_loop_guided_start
) GOMP_loop_nonmonotonic_guided_start
723 __attribute__((alias ("gomp_loop_guided_start")));
725 extern __typeof(gomp_loop_ordered_static_start
) GOMP_loop_ordered_static_start
726 __attribute__((alias ("gomp_loop_ordered_static_start")));
727 extern __typeof(gomp_loop_ordered_dynamic_start
) GOMP_loop_ordered_dynamic_start
728 __attribute__((alias ("gomp_loop_ordered_dynamic_start")));
729 extern __typeof(gomp_loop_ordered_guided_start
) GOMP_loop_ordered_guided_start
730 __attribute__((alias ("gomp_loop_ordered_guided_start")));
732 extern __typeof(gomp_loop_doacross_static_start
) GOMP_loop_doacross_static_start
733 __attribute__((alias ("gomp_loop_doacross_static_start")));
734 extern __typeof(gomp_loop_doacross_dynamic_start
) GOMP_loop_doacross_dynamic_start
735 __attribute__((alias ("gomp_loop_doacross_dynamic_start")));
736 extern __typeof(gomp_loop_doacross_guided_start
) GOMP_loop_doacross_guided_start
737 __attribute__((alias ("gomp_loop_doacross_guided_start")));
739 extern __typeof(gomp_loop_static_next
) GOMP_loop_static_next
740 __attribute__((alias ("gomp_loop_static_next")));
741 extern __typeof(gomp_loop_dynamic_next
) GOMP_loop_dynamic_next
742 __attribute__((alias ("gomp_loop_dynamic_next")));
743 extern __typeof(gomp_loop_guided_next
) GOMP_loop_guided_next
744 __attribute__((alias ("gomp_loop_guided_next")));
745 extern __typeof(gomp_loop_dynamic_next
) GOMP_loop_nonmonotonic_dynamic_next
746 __attribute__((alias ("gomp_loop_dynamic_next")));
747 extern __typeof(gomp_loop_guided_next
) GOMP_loop_nonmonotonic_guided_next
748 __attribute__((alias ("gomp_loop_guided_next")));
750 extern __typeof(gomp_loop_ordered_static_next
) GOMP_loop_ordered_static_next
751 __attribute__((alias ("gomp_loop_ordered_static_next")));
752 extern __typeof(gomp_loop_ordered_dynamic_next
) GOMP_loop_ordered_dynamic_next
753 __attribute__((alias ("gomp_loop_ordered_dynamic_next")));
754 extern __typeof(gomp_loop_ordered_guided_next
) GOMP_loop_ordered_guided_next
755 __attribute__((alias ("gomp_loop_ordered_guided_next")));
758 GOMP_loop_static_start (long start
, long end
, long incr
, long chunk_size
,
759 long *istart
, long *iend
)
761 return gomp_loop_static_start (start
, end
, incr
, chunk_size
, istart
, iend
);
765 GOMP_loop_dynamic_start (long start
, long end
, long incr
, long chunk_size
,
766 long *istart
, long *iend
)
768 return gomp_loop_dynamic_start (start
, end
, incr
, chunk_size
, istart
, iend
);
772 GOMP_loop_guided_start (long start
, long end
, long incr
, long chunk_size
,
773 long *istart
, long *iend
)
775 return gomp_loop_guided_start (start
, end
, incr
, chunk_size
, istart
, iend
);
779 GOMP_loop_nonmonotonic_dynamic_start (long start
, long end
, long incr
,
780 long chunk_size
, long *istart
,
783 return gomp_loop_dynamic_start (start
, end
, incr
, chunk_size
, istart
, iend
);
787 GOMP_loop_nonmonotonic_guided_start (long start
, long end
, long incr
,
788 long chunk_size
, long *istart
, long *iend
)
790 return gomp_loop_guided_start (start
, end
, incr
, chunk_size
, istart
, iend
);
794 GOMP_loop_ordered_static_start (long start
, long end
, long incr
,
795 long chunk_size
, long *istart
, long *iend
)
797 return gomp_loop_ordered_static_start (start
, end
, incr
, chunk_size
,
802 GOMP_loop_ordered_dynamic_start (long start
, long end
, long incr
,
803 long chunk_size
, long *istart
, long *iend
)
805 return gomp_loop_ordered_dynamic_start (start
, end
, incr
, chunk_size
,
810 GOMP_loop_ordered_guided_start (long start
, long end
, long incr
,
811 long chunk_size
, long *istart
, long *iend
)
813 return gomp_loop_ordered_guided_start (start
, end
, incr
, chunk_size
,
818 GOMP_loop_doacross_static_start (unsigned ncounts
, long *counts
,
819 long chunk_size
, long *istart
, long *iend
)
821 return gomp_loop_doacross_static_start (ncounts
, counts
, chunk_size
,
826 GOMP_loop_doacross_dynamic_start (unsigned ncounts
, long *counts
,
827 long chunk_size
, long *istart
, long *iend
)
829 return gomp_loop_doacross_dynamic_start (ncounts
, counts
, chunk_size
,
834 GOMP_loop_doacross_guided_start (unsigned ncounts
, long *counts
,
835 long chunk_size
, long *istart
, long *iend
)
837 return gomp_loop_doacross_guided_start (ncounts
, counts
, chunk_size
,
842 GOMP_loop_static_next (long *istart
, long *iend
)
844 return gomp_loop_static_next (istart
, iend
);
848 GOMP_loop_dynamic_next (long *istart
, long *iend
)
850 return gomp_loop_dynamic_next (istart
, iend
);
854 GOMP_loop_guided_next (long *istart
, long *iend
)
856 return gomp_loop_guided_next (istart
, iend
);
860 GOMP_loop_nonmonotonic_dynamic_next (long *istart
, long *iend
)
862 return gomp_loop_dynamic_next (istart
, iend
);
866 GOMP_loop_nonmonotonic_guided_next (long *istart
, long *iend
)
868 return gomp_loop_guided_next (istart
, iend
);
872 GOMP_loop_ordered_static_next (long *istart
, long *iend
)
874 return gomp_loop_ordered_static_next (istart
, iend
);
878 GOMP_loop_ordered_dynamic_next (long *istart
, long *iend
)
880 return gomp_loop_ordered_dynamic_next (istart
, iend
);
884 GOMP_loop_ordered_guided_next (long *istart
, long *iend
)
886 return gomp_loop_ordered_guided_next (istart
, iend
);