1 /* Copyright (C) 2005-2015 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
33 /* Initialize the given work share construct from the given arguments. */
36 gomp_loop_init (struct gomp_work_share
*ws
, long start
, long end
, long incr
,
37 enum gomp_schedule_type sched
, long chunk_size
)
40 ws
->chunk_size
= chunk_size
;
41 /* Canonicalize loops that have zero iterations to ->next == ->end. */
42 ws
->end
= ((incr
> 0 && start
> end
) || (incr
< 0 && start
< end
))
46 if (sched
== GFS_DYNAMIC
)
48 ws
->chunk_size
*= incr
;
50 #ifdef HAVE_SYNC_BUILTINS
52 /* For dynamic scheduling prepare things to make each iteration
54 struct gomp_thread
*thr
= gomp_thread ();
55 struct gomp_team
*team
= thr
->ts
.team
;
56 long nthreads
= team
? team
->nthreads
: 1;
58 if (__builtin_expect (incr
> 0, 1))
60 /* Cheap overflow protection. */
61 if (__builtin_expect ((nthreads
| ws
->chunk_size
)
62 >= 1UL << (sizeof (long)
63 * __CHAR_BIT__
/ 2 - 1), 0))
66 ws
->mode
= ws
->end
< (LONG_MAX
67 - (nthreads
+ 1) * ws
->chunk_size
);
69 /* Cheap overflow protection. */
70 else if (__builtin_expect ((nthreads
| -ws
->chunk_size
)
71 >= 1UL << (sizeof (long)
72 * __CHAR_BIT__
/ 2 - 1), 0))
75 ws
->mode
= ws
->end
> (nthreads
+ 1) * -ws
->chunk_size
- LONG_MAX
;
81 /* The *_start routines are called when first encountering a loop construct
82 that is not bound directly to a parallel construct. The first thread
83 that arrives will create the work-share construct; subsequent threads
84 will see the construct exists and allocate work from it.
86 START, END, INCR are the bounds of the loop; due to the restrictions of
87 OpenMP, these values must be the same in every thread. This is not
88 verified (nor is it entirely verifiable, since START is not necessarily
89 retained intact in the work-share data structure). CHUNK_SIZE is the
90 scheduling parameter; again this must be identical in all threads.
92 Returns true if there's any work for this thread to perform. If so,
93 *ISTART and *IEND are filled with the bounds of the iteration block
94 allocated to this thread. Returns false if all work was assigned to
95 other threads prior to this thread's arrival. */
98 gomp_loop_static_start (long start
, long end
, long incr
, long chunk_size
,
99 long *istart
, long *iend
)
101 struct gomp_thread
*thr
= gomp_thread ();
103 thr
->ts
.static_trip
= 0;
104 if (gomp_work_share_start (false))
106 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
107 GFS_STATIC
, chunk_size
);
108 gomp_work_share_init_done ();
111 return !gomp_iter_static_next (istart
, iend
);
115 gomp_loop_dynamic_start (long start
, long end
, long incr
, long chunk_size
,
116 long *istart
, long *iend
)
118 struct gomp_thread
*thr
= gomp_thread ();
121 if (gomp_work_share_start (false))
123 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
124 GFS_DYNAMIC
, chunk_size
);
125 gomp_work_share_init_done ();
128 #ifdef HAVE_SYNC_BUILTINS
129 ret
= gomp_iter_dynamic_next (istart
, iend
);
131 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
132 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
133 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
140 gomp_loop_guided_start (long start
, long end
, long incr
, long chunk_size
,
141 long *istart
, long *iend
)
143 struct gomp_thread
*thr
= gomp_thread ();
146 if (gomp_work_share_start (false))
148 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
149 GFS_GUIDED
, chunk_size
);
150 gomp_work_share_init_done ();
153 #ifdef HAVE_SYNC_BUILTINS
154 ret
= gomp_iter_guided_next (istart
, iend
);
156 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
157 ret
= gomp_iter_guided_next_locked (istart
, iend
);
158 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
165 GOMP_loop_runtime_start (long start
, long end
, long incr
,
166 long *istart
, long *iend
)
168 struct gomp_task_icv
*icv
= gomp_icv (false);
169 switch (icv
->run_sched_var
)
172 return gomp_loop_static_start (start
, end
, incr
,
173 icv
->run_sched_chunk_size
,
176 return gomp_loop_dynamic_start (start
, end
, incr
,
177 icv
->run_sched_chunk_size
,
180 return gomp_loop_guided_start (start
, end
, incr
,
181 icv
->run_sched_chunk_size
,
184 /* For now map to schedule(static), later on we could play with feedback
186 return gomp_loop_static_start (start
, end
, incr
, 0, istart
, iend
);
192 /* The *_ordered_*_start routines are similar. The only difference is that
193 this work-share construct is initialized to expect an ORDERED section. */
196 gomp_loop_ordered_static_start (long start
, long end
, long incr
,
197 long chunk_size
, long *istart
, long *iend
)
199 struct gomp_thread
*thr
= gomp_thread ();
201 thr
->ts
.static_trip
= 0;
202 if (gomp_work_share_start (true))
204 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
205 GFS_STATIC
, chunk_size
);
206 gomp_ordered_static_init ();
207 gomp_work_share_init_done ();
210 return !gomp_iter_static_next (istart
, iend
);
214 gomp_loop_ordered_dynamic_start (long start
, long end
, long incr
,
215 long chunk_size
, long *istart
, long *iend
)
217 struct gomp_thread
*thr
= gomp_thread ();
220 if (gomp_work_share_start (true))
222 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
223 GFS_DYNAMIC
, chunk_size
);
224 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
225 gomp_work_share_init_done ();
228 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
230 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
232 gomp_ordered_first ();
233 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
239 gomp_loop_ordered_guided_start (long start
, long end
, long incr
,
240 long chunk_size
, long *istart
, long *iend
)
242 struct gomp_thread
*thr
= gomp_thread ();
245 if (gomp_work_share_start (true))
247 gomp_loop_init (thr
->ts
.work_share
, start
, end
, incr
,
248 GFS_GUIDED
, chunk_size
);
249 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
250 gomp_work_share_init_done ();
253 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
255 ret
= gomp_iter_guided_next_locked (istart
, iend
);
257 gomp_ordered_first ();
258 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
264 GOMP_loop_ordered_runtime_start (long start
, long end
, long incr
,
265 long *istart
, long *iend
)
267 struct gomp_task_icv
*icv
= gomp_icv (false);
268 switch (icv
->run_sched_var
)
271 return gomp_loop_ordered_static_start (start
, end
, incr
,
272 icv
->run_sched_chunk_size
,
275 return gomp_loop_ordered_dynamic_start (start
, end
, incr
,
276 icv
->run_sched_chunk_size
,
279 return gomp_loop_ordered_guided_start (start
, end
, incr
,
280 icv
->run_sched_chunk_size
,
283 /* For now map to schedule(static), later on we could play with feedback
285 return gomp_loop_ordered_static_start (start
, end
, incr
,
292 /* The *_doacross_*_start routines are similar. The only difference is that
293 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
294 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
295 and other COUNTS array elements tell the library number of iterations
296 in the ordered inner loops. */
299 gomp_loop_doacross_static_start (unsigned ncounts
, long *counts
,
300 long chunk_size
, long *istart
, long *iend
)
302 struct gomp_thread
*thr
= gomp_thread ();
304 thr
->ts
.static_trip
= 0;
305 if (gomp_work_share_start (false))
307 gomp_loop_init (thr
->ts
.work_share
, 0, counts
[0], 1,
308 GFS_STATIC
, chunk_size
);
309 gomp_doacross_init (ncounts
, counts
, chunk_size
);
310 gomp_work_share_init_done ();
313 return !gomp_iter_static_next (istart
, iend
);
317 gomp_loop_doacross_dynamic_start (unsigned ncounts
, long *counts
,
318 long chunk_size
, long *istart
, long *iend
)
320 struct gomp_thread
*thr
= gomp_thread ();
323 if (gomp_work_share_start (false))
325 gomp_loop_init (thr
->ts
.work_share
, 0, counts
[0], 1,
326 GFS_DYNAMIC
, chunk_size
);
327 gomp_doacross_init (ncounts
, counts
, chunk_size
);
328 gomp_work_share_init_done ();
331 #ifdef HAVE_SYNC_BUILTINS
332 ret
= gomp_iter_dynamic_next (istart
, iend
);
334 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
335 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
336 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
343 gomp_loop_doacross_guided_start (unsigned ncounts
, long *counts
,
344 long chunk_size
, long *istart
, long *iend
)
346 struct gomp_thread
*thr
= gomp_thread ();
349 if (gomp_work_share_start (false))
351 gomp_loop_init (thr
->ts
.work_share
, 0, counts
[0], 1,
352 GFS_GUIDED
, chunk_size
);
353 gomp_doacross_init (ncounts
, counts
, chunk_size
);
354 gomp_work_share_init_done ();
357 #ifdef HAVE_SYNC_BUILTINS
358 ret
= gomp_iter_guided_next (istart
, iend
);
360 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
361 ret
= gomp_iter_guided_next_locked (istart
, iend
);
362 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
369 GOMP_loop_doacross_runtime_start (unsigned ncounts
, long *counts
,
370 long *istart
, long *iend
)
372 struct gomp_task_icv
*icv
= gomp_icv (false);
373 switch (icv
->run_sched_var
)
376 return gomp_loop_doacross_static_start (ncounts
, counts
,
377 icv
->run_sched_chunk_size
,
380 return gomp_loop_doacross_dynamic_start (ncounts
, counts
,
381 icv
->run_sched_chunk_size
,
384 return gomp_loop_doacross_guided_start (ncounts
, counts
,
385 icv
->run_sched_chunk_size
,
388 /* For now map to schedule(static), later on we could play with feedback
390 return gomp_loop_doacross_static_start (ncounts
, counts
,
397 /* The *_next routines are called when the thread completes processing of
398 the iteration block currently assigned to it. If the work-share
399 construct is bound directly to a parallel construct, then the iteration
400 bounds may have been set up before the parallel. In which case, this
401 may be the first iteration for the thread.
403 Returns true if there is work remaining to be performed; *ISTART and
404 *IEND are filled with a new iteration block. Returns false if all work
405 has been assigned. */
408 gomp_loop_static_next (long *istart
, long *iend
)
410 return !gomp_iter_static_next (istart
, iend
);
414 gomp_loop_dynamic_next (long *istart
, long *iend
)
418 #ifdef HAVE_SYNC_BUILTINS
419 ret
= gomp_iter_dynamic_next (istart
, iend
);
421 struct gomp_thread
*thr
= gomp_thread ();
422 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
423 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
424 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
431 gomp_loop_guided_next (long *istart
, long *iend
)
435 #ifdef HAVE_SYNC_BUILTINS
436 ret
= gomp_iter_guided_next (istart
, iend
);
438 struct gomp_thread
*thr
= gomp_thread ();
439 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
440 ret
= gomp_iter_guided_next_locked (istart
, iend
);
441 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
448 GOMP_loop_runtime_next (long *istart
, long *iend
)
450 struct gomp_thread
*thr
= gomp_thread ();
452 switch (thr
->ts
.work_share
->sched
)
456 return gomp_loop_static_next (istart
, iend
);
458 return gomp_loop_dynamic_next (istart
, iend
);
460 return gomp_loop_guided_next (istart
, iend
);
466 /* The *_ordered_*_next routines are called when the thread completes
467 processing of the iteration block currently assigned to it.
469 Returns true if there is work remaining to be performed; *ISTART and
470 *IEND are filled with a new iteration block. Returns false if all work
471 has been assigned. */
474 gomp_loop_ordered_static_next (long *istart
, long *iend
)
476 struct gomp_thread
*thr
= gomp_thread ();
479 gomp_ordered_sync ();
480 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
481 test
= gomp_iter_static_next (istart
, iend
);
483 gomp_ordered_static_next ();
484 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
490 gomp_loop_ordered_dynamic_next (long *istart
, long *iend
)
492 struct gomp_thread
*thr
= gomp_thread ();
495 gomp_ordered_sync ();
496 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
497 ret
= gomp_iter_dynamic_next_locked (istart
, iend
);
499 gomp_ordered_next ();
501 gomp_ordered_last ();
502 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
508 gomp_loop_ordered_guided_next (long *istart
, long *iend
)
510 struct gomp_thread
*thr
= gomp_thread ();
513 gomp_ordered_sync ();
514 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
515 ret
= gomp_iter_guided_next_locked (istart
, iend
);
517 gomp_ordered_next ();
519 gomp_ordered_last ();
520 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
526 GOMP_loop_ordered_runtime_next (long *istart
, long *iend
)
528 struct gomp_thread
*thr
= gomp_thread ();
530 switch (thr
->ts
.work_share
->sched
)
534 return gomp_loop_ordered_static_next (istart
, iend
);
536 return gomp_loop_ordered_dynamic_next (istart
, iend
);
538 return gomp_loop_ordered_guided_next (istart
, iend
);
544 /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
545 to avoid one synchronization once we get into the loop. */
548 gomp_parallel_loop_start (void (*fn
) (void *), void *data
,
549 unsigned num_threads
, long start
, long end
,
550 long incr
, enum gomp_schedule_type sched
,
551 long chunk_size
, unsigned int flags
)
553 struct gomp_team
*team
;
555 num_threads
= gomp_resolve_num_threads (num_threads
, 0);
556 team
= gomp_new_team (num_threads
);
557 gomp_loop_init (&team
->work_shares
[0], start
, end
, incr
, sched
, chunk_size
);
558 gomp_team_start (fn
, data
, num_threads
, flags
, team
);
562 GOMP_parallel_loop_static_start (void (*fn
) (void *), void *data
,
563 unsigned num_threads
, long start
, long end
,
564 long incr
, long chunk_size
)
566 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
567 GFS_STATIC
, chunk_size
, 0);
571 GOMP_parallel_loop_dynamic_start (void (*fn
) (void *), void *data
,
572 unsigned num_threads
, long start
, long end
,
573 long incr
, long chunk_size
)
575 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
576 GFS_DYNAMIC
, chunk_size
, 0);
580 GOMP_parallel_loop_guided_start (void (*fn
) (void *), void *data
,
581 unsigned num_threads
, long start
, long end
,
582 long incr
, long chunk_size
)
584 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
585 GFS_GUIDED
, chunk_size
, 0);
589 GOMP_parallel_loop_runtime_start (void (*fn
) (void *), void *data
,
590 unsigned num_threads
, long start
, long end
,
593 struct gomp_task_icv
*icv
= gomp_icv (false);
594 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
595 icv
->run_sched_var
, icv
->run_sched_chunk_size
, 0);
598 ialias_redirect (GOMP_parallel_end
)
601 GOMP_parallel_loop_static (void (*fn
) (void *), void *data
,
602 unsigned num_threads
, long start
, long end
,
603 long incr
, long chunk_size
, unsigned flags
)
605 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
606 GFS_STATIC
, chunk_size
, flags
);
608 GOMP_parallel_end ();
612 GOMP_parallel_loop_dynamic (void (*fn
) (void *), void *data
,
613 unsigned num_threads
, long start
, long end
,
614 long incr
, long chunk_size
, unsigned flags
)
616 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
617 GFS_DYNAMIC
, chunk_size
, flags
);
619 GOMP_parallel_end ();
623 GOMP_parallel_loop_guided (void (*fn
) (void *), void *data
,
624 unsigned num_threads
, long start
, long end
,
625 long incr
, long chunk_size
, unsigned flags
)
627 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
628 GFS_GUIDED
, chunk_size
, flags
);
630 GOMP_parallel_end ();
634 GOMP_parallel_loop_runtime (void (*fn
) (void *), void *data
,
635 unsigned num_threads
, long start
, long end
,
636 long incr
, unsigned flags
)
638 struct gomp_task_icv
*icv
= gomp_icv (false);
639 gomp_parallel_loop_start (fn
, data
, num_threads
, start
, end
, incr
,
640 icv
->run_sched_var
, icv
->run_sched_chunk_size
,
643 GOMP_parallel_end ();
646 /* The GOMP_loop_end* routines are called after the thread is told that
647 all loop iterations are complete. The first two versions synchronize
648 all threads; the nowait version does not. */
653 gomp_work_share_end ();
657 GOMP_loop_end_cancel (void)
659 return gomp_work_share_end_cancel ();
663 GOMP_loop_end_nowait (void)
665 gomp_work_share_end_nowait ();
669 /* We use static functions above so that we're sure that the "runtime"
670 function can defer to the proper routine without interposition. We
671 export the static function with a strong alias when possible, or with
672 a wrapper function otherwise. */
674 #ifdef HAVE_ATTRIBUTE_ALIAS
675 extern __typeof(gomp_loop_static_start
) GOMP_loop_static_start
676 __attribute__((alias ("gomp_loop_static_start")));
677 extern __typeof(gomp_loop_dynamic_start
) GOMP_loop_dynamic_start
678 __attribute__((alias ("gomp_loop_dynamic_start")));
679 extern __typeof(gomp_loop_guided_start
) GOMP_loop_guided_start
680 __attribute__((alias ("gomp_loop_guided_start")));
682 extern __typeof(gomp_loop_ordered_static_start
) GOMP_loop_ordered_static_start
683 __attribute__((alias ("gomp_loop_ordered_static_start")));
684 extern __typeof(gomp_loop_ordered_dynamic_start
) GOMP_loop_ordered_dynamic_start
685 __attribute__((alias ("gomp_loop_ordered_dynamic_start")));
686 extern __typeof(gomp_loop_ordered_guided_start
) GOMP_loop_ordered_guided_start
687 __attribute__((alias ("gomp_loop_ordered_guided_start")));
689 extern __typeof(gomp_loop_doacross_static_start
) GOMP_loop_doacross_static_start
690 __attribute__((alias ("gomp_loop_doacross_static_start")));
691 extern __typeof(gomp_loop_doacross_dynamic_start
) GOMP_loop_doacross_dynamic_start
692 __attribute__((alias ("gomp_loop_doacross_dynamic_start")));
693 extern __typeof(gomp_loop_doacross_guided_start
) GOMP_loop_doacross_guided_start
694 __attribute__((alias ("gomp_loop_doacross_guided_start")));
696 extern __typeof(gomp_loop_static_next
) GOMP_loop_static_next
697 __attribute__((alias ("gomp_loop_static_next")));
698 extern __typeof(gomp_loop_dynamic_next
) GOMP_loop_dynamic_next
699 __attribute__((alias ("gomp_loop_dynamic_next")));
700 extern __typeof(gomp_loop_guided_next
) GOMP_loop_guided_next
701 __attribute__((alias ("gomp_loop_guided_next")));
703 extern __typeof(gomp_loop_ordered_static_next
) GOMP_loop_ordered_static_next
704 __attribute__((alias ("gomp_loop_ordered_static_next")));
705 extern __typeof(gomp_loop_ordered_dynamic_next
) GOMP_loop_ordered_dynamic_next
706 __attribute__((alias ("gomp_loop_ordered_dynamic_next")));
707 extern __typeof(gomp_loop_ordered_guided_next
) GOMP_loop_ordered_guided_next
708 __attribute__((alias ("gomp_loop_ordered_guided_next")));
711 GOMP_loop_static_start (long start
, long end
, long incr
, long chunk_size
,
712 long *istart
, long *iend
)
714 return gomp_loop_static_start (start
, end
, incr
, chunk_size
, istart
, iend
);
718 GOMP_loop_dynamic_start (long start
, long end
, long incr
, long chunk_size
,
719 long *istart
, long *iend
)
721 return gomp_loop_dynamic_start (start
, end
, incr
, chunk_size
, istart
, iend
);
725 GOMP_loop_guided_start (long start
, long end
, long incr
, long chunk_size
,
726 long *istart
, long *iend
)
728 return gomp_loop_guided_start (start
, end
, incr
, chunk_size
, istart
, iend
);
732 GOMP_loop_ordered_static_start (long start
, long end
, long incr
,
733 long chunk_size
, long *istart
, long *iend
)
735 return gomp_loop_ordered_static_start (start
, end
, incr
, chunk_size
,
740 GOMP_loop_ordered_dynamic_start (long start
, long end
, long incr
,
741 long chunk_size
, long *istart
, long *iend
)
743 return gomp_loop_ordered_dynamic_start (start
, end
, incr
, chunk_size
,
748 GOMP_loop_ordered_guided_start (long start
, long end
, long incr
,
749 long chunk_size
, long *istart
, long *iend
)
751 return gomp_loop_ordered_guided_start (start
, end
, incr
, chunk_size
,
756 GOMP_loop_doacross_static_start (unsigned ncounts
, long *counts
,
757 long chunk_size
, long *istart
, long *iend
)
759 return gomp_loop_doacross_static_start (ncounts
, counts
, chunk_size
,
764 GOMP_loop_doacross_dynamic_start (unsigned ncounts
, long *counts
,
765 long chunk_size
, long *istart
, long *iend
)
767 return gomp_loop_doacross_dynamic_start (ncounts
, counts
, chunk_size
,
772 GOMP_loop_doacross_guided_start (unsigned ncounts
, long *counts
,
773 long chunk_size
, long *istart
, long *iend
)
775 return gomp_loop_doacross_guided_start (ncounts
, counts
, chunk_size
,
780 GOMP_loop_static_next (long *istart
, long *iend
)
782 return gomp_loop_static_next (istart
, iend
);
786 GOMP_loop_dynamic_next (long *istart
, long *iend
)
788 return gomp_loop_dynamic_next (istart
, iend
);
792 GOMP_loop_guided_next (long *istart
, long *iend
)
794 return gomp_loop_guided_next (istart
, iend
);
798 GOMP_loop_ordered_static_next (long *istart
, long *iend
)
800 return gomp_loop_ordered_static_next (istart
, iend
);
804 GOMP_loop_ordered_dynamic_next (long *istart
, long *iend
)
806 return gomp_loop_ordered_dynamic_next (istart
, iend
);
810 GOMP_loop_ordered_guided_next (long *istart
, long *iend
)
812 return gomp_loop_ordered_guided_next (istart
, iend
);