Remove fold_strip_sign_ops
[official-gcc.git] / libgomp / loop.c
blob812f66cd7253c4e35080d31f12aed0faac832061
1 /* Copyright (C) 2005-2015 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
28 #include <limits.h>
29 #include <stdlib.h>
30 #include "libgomp.h"
33 /* Initialize the given work share construct from the given arguments. */
35 static inline void
36 gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
37 enum gomp_schedule_type sched, long chunk_size)
39 ws->sched = sched;
40 ws->chunk_size = chunk_size;
41 /* Canonicalize loops that have zero iterations to ->next == ->end. */
42 ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
43 ? start : end;
44 ws->incr = incr;
45 ws->next = start;
46 if (sched == GFS_DYNAMIC)
48 ws->chunk_size *= incr;
50 #ifdef HAVE_SYNC_BUILTINS
52 /* For dynamic scheduling prepare things to make each iteration
53 faster. */
54 struct gomp_thread *thr = gomp_thread ();
55 struct gomp_team *team = thr->ts.team;
56 long nthreads = team ? team->nthreads : 1;
58 if (__builtin_expect (incr > 0, 1))
60 /* Cheap overflow protection. */
61 if (__builtin_expect ((nthreads | ws->chunk_size)
62 >= 1UL << (sizeof (long)
63 * __CHAR_BIT__ / 2 - 1), 0))
64 ws->mode = 0;
65 else
66 ws->mode = ws->end < (LONG_MAX
67 - (nthreads + 1) * ws->chunk_size);
69 /* Cheap overflow protection. */
70 else if (__builtin_expect ((nthreads | -ws->chunk_size)
71 >= 1UL << (sizeof (long)
72 * __CHAR_BIT__ / 2 - 1), 0))
73 ws->mode = 0;
74 else
75 ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX;
77 #endif
81 /* The *_start routines are called when first encountering a loop construct
82 that is not bound directly to a parallel construct. The first thread
83 that arrives will create the work-share construct; subsequent threads
84 will see the construct exists and allocate work from it.
86 START, END, INCR are the bounds of the loop; due to the restrictions of
87 OpenMP, these values must be the same in every thread. This is not
88 verified (nor is it entirely verifiable, since START is not necessarily
89 retained intact in the work-share data structure). CHUNK_SIZE is the
90 scheduling parameter; again this must be identical in all threads.
92 Returns true if there's any work for this thread to perform. If so,
93 *ISTART and *IEND are filled with the bounds of the iteration block
94 allocated to this thread. Returns false if all work was assigned to
95 other threads prior to this thread's arrival. */
97 static bool
98 gomp_loop_static_start (long start, long end, long incr, long chunk_size,
99 long *istart, long *iend)
101 struct gomp_thread *thr = gomp_thread ();
103 thr->ts.static_trip = 0;
104 if (gomp_work_share_start (false))
106 gomp_loop_init (thr->ts.work_share, start, end, incr,
107 GFS_STATIC, chunk_size);
108 gomp_work_share_init_done ();
111 return !gomp_iter_static_next (istart, iend);
114 static bool
115 gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
116 long *istart, long *iend)
118 struct gomp_thread *thr = gomp_thread ();
119 bool ret;
121 if (gomp_work_share_start (false))
123 gomp_loop_init (thr->ts.work_share, start, end, incr,
124 GFS_DYNAMIC, chunk_size);
125 gomp_work_share_init_done ();
128 #ifdef HAVE_SYNC_BUILTINS
129 ret = gomp_iter_dynamic_next (istart, iend);
130 #else
131 gomp_mutex_lock (&thr->ts.work_share->lock);
132 ret = gomp_iter_dynamic_next_locked (istart, iend);
133 gomp_mutex_unlock (&thr->ts.work_share->lock);
134 #endif
136 return ret;
139 static bool
140 gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
141 long *istart, long *iend)
143 struct gomp_thread *thr = gomp_thread ();
144 bool ret;
146 if (gomp_work_share_start (false))
148 gomp_loop_init (thr->ts.work_share, start, end, incr,
149 GFS_GUIDED, chunk_size);
150 gomp_work_share_init_done ();
153 #ifdef HAVE_SYNC_BUILTINS
154 ret = gomp_iter_guided_next (istart, iend);
155 #else
156 gomp_mutex_lock (&thr->ts.work_share->lock);
157 ret = gomp_iter_guided_next_locked (istart, iend);
158 gomp_mutex_unlock (&thr->ts.work_share->lock);
159 #endif
161 return ret;
164 bool
165 GOMP_loop_runtime_start (long start, long end, long incr,
166 long *istart, long *iend)
168 struct gomp_task_icv *icv = gomp_icv (false);
169 switch (icv->run_sched_var)
171 case GFS_STATIC:
172 return gomp_loop_static_start (start, end, incr,
173 icv->run_sched_chunk_size,
174 istart, iend);
175 case GFS_DYNAMIC:
176 return gomp_loop_dynamic_start (start, end, incr,
177 icv->run_sched_chunk_size,
178 istart, iend);
179 case GFS_GUIDED:
180 return gomp_loop_guided_start (start, end, incr,
181 icv->run_sched_chunk_size,
182 istart, iend);
183 case GFS_AUTO:
184 /* For now map to schedule(static), later on we could play with feedback
185 driven choice. */
186 return gomp_loop_static_start (start, end, incr, 0, istart, iend);
187 default:
188 abort ();
192 /* The *_ordered_*_start routines are similar. The only difference is that
193 this work-share construct is initialized to expect an ORDERED section. */
195 static bool
196 gomp_loop_ordered_static_start (long start, long end, long incr,
197 long chunk_size, long *istart, long *iend)
199 struct gomp_thread *thr = gomp_thread ();
201 thr->ts.static_trip = 0;
202 if (gomp_work_share_start (true))
204 gomp_loop_init (thr->ts.work_share, start, end, incr,
205 GFS_STATIC, chunk_size);
206 gomp_ordered_static_init ();
207 gomp_work_share_init_done ();
210 return !gomp_iter_static_next (istart, iend);
213 static bool
214 gomp_loop_ordered_dynamic_start (long start, long end, long incr,
215 long chunk_size, long *istart, long *iend)
217 struct gomp_thread *thr = gomp_thread ();
218 bool ret;
220 if (gomp_work_share_start (true))
222 gomp_loop_init (thr->ts.work_share, start, end, incr,
223 GFS_DYNAMIC, chunk_size);
224 gomp_mutex_lock (&thr->ts.work_share->lock);
225 gomp_work_share_init_done ();
227 else
228 gomp_mutex_lock (&thr->ts.work_share->lock);
230 ret = gomp_iter_dynamic_next_locked (istart, iend);
231 if (ret)
232 gomp_ordered_first ();
233 gomp_mutex_unlock (&thr->ts.work_share->lock);
235 return ret;
238 static bool
239 gomp_loop_ordered_guided_start (long start, long end, long incr,
240 long chunk_size, long *istart, long *iend)
242 struct gomp_thread *thr = gomp_thread ();
243 bool ret;
245 if (gomp_work_share_start (true))
247 gomp_loop_init (thr->ts.work_share, start, end, incr,
248 GFS_GUIDED, chunk_size);
249 gomp_mutex_lock (&thr->ts.work_share->lock);
250 gomp_work_share_init_done ();
252 else
253 gomp_mutex_lock (&thr->ts.work_share->lock);
255 ret = gomp_iter_guided_next_locked (istart, iend);
256 if (ret)
257 gomp_ordered_first ();
258 gomp_mutex_unlock (&thr->ts.work_share->lock);
260 return ret;
263 bool
264 GOMP_loop_ordered_runtime_start (long start, long end, long incr,
265 long *istart, long *iend)
267 struct gomp_task_icv *icv = gomp_icv (false);
268 switch (icv->run_sched_var)
270 case GFS_STATIC:
271 return gomp_loop_ordered_static_start (start, end, incr,
272 icv->run_sched_chunk_size,
273 istart, iend);
274 case GFS_DYNAMIC:
275 return gomp_loop_ordered_dynamic_start (start, end, incr,
276 icv->run_sched_chunk_size,
277 istart, iend);
278 case GFS_GUIDED:
279 return gomp_loop_ordered_guided_start (start, end, incr,
280 icv->run_sched_chunk_size,
281 istart, iend);
282 case GFS_AUTO:
283 /* For now map to schedule(static), later on we could play with feedback
284 driven choice. */
285 return gomp_loop_ordered_static_start (start, end, incr,
286 0, istart, iend);
287 default:
288 abort ();
292 /* The *_doacross_*_start routines are similar. The only difference is that
293 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
294 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
295 and other COUNTS array elements tell the library number of iterations
296 in the ordered inner loops. */
298 static bool
299 gomp_loop_doacross_static_start (unsigned ncounts, long *counts,
300 long chunk_size, long *istart, long *iend)
302 struct gomp_thread *thr = gomp_thread ();
304 thr->ts.static_trip = 0;
305 if (gomp_work_share_start (false))
307 gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
308 GFS_STATIC, chunk_size);
309 gomp_doacross_init (ncounts, counts, chunk_size);
310 gomp_work_share_init_done ();
313 return !gomp_iter_static_next (istart, iend);
316 static bool
317 gomp_loop_doacross_dynamic_start (unsigned ncounts, long *counts,
318 long chunk_size, long *istart, long *iend)
320 struct gomp_thread *thr = gomp_thread ();
321 bool ret;
323 if (gomp_work_share_start (false))
325 gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
326 GFS_DYNAMIC, chunk_size);
327 gomp_doacross_init (ncounts, counts, chunk_size);
328 gomp_work_share_init_done ();
331 #ifdef HAVE_SYNC_BUILTINS
332 ret = gomp_iter_dynamic_next (istart, iend);
333 #else
334 gomp_mutex_lock (&thr->ts.work_share->lock);
335 ret = gomp_iter_dynamic_next_locked (istart, iend);
336 gomp_mutex_unlock (&thr->ts.work_share->lock);
337 #endif
339 return ret;
342 static bool
343 gomp_loop_doacross_guided_start (unsigned ncounts, long *counts,
344 long chunk_size, long *istart, long *iend)
346 struct gomp_thread *thr = gomp_thread ();
347 bool ret;
349 if (gomp_work_share_start (false))
351 gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
352 GFS_GUIDED, chunk_size);
353 gomp_doacross_init (ncounts, counts, chunk_size);
354 gomp_work_share_init_done ();
357 #ifdef HAVE_SYNC_BUILTINS
358 ret = gomp_iter_guided_next (istart, iend);
359 #else
360 gomp_mutex_lock (&thr->ts.work_share->lock);
361 ret = gomp_iter_guided_next_locked (istart, iend);
362 gomp_mutex_unlock (&thr->ts.work_share->lock);
363 #endif
365 return ret;
368 bool
369 GOMP_loop_doacross_runtime_start (unsigned ncounts, long *counts,
370 long *istart, long *iend)
372 struct gomp_task_icv *icv = gomp_icv (false);
373 switch (icv->run_sched_var)
375 case GFS_STATIC:
376 return gomp_loop_doacross_static_start (ncounts, counts,
377 icv->run_sched_chunk_size,
378 istart, iend);
379 case GFS_DYNAMIC:
380 return gomp_loop_doacross_dynamic_start (ncounts, counts,
381 icv->run_sched_chunk_size,
382 istart, iend);
383 case GFS_GUIDED:
384 return gomp_loop_doacross_guided_start (ncounts, counts,
385 icv->run_sched_chunk_size,
386 istart, iend);
387 case GFS_AUTO:
388 /* For now map to schedule(static), later on we could play with feedback
389 driven choice. */
390 return gomp_loop_doacross_static_start (ncounts, counts,
391 0, istart, iend);
392 default:
393 abort ();
397 /* The *_next routines are called when the thread completes processing of
398 the iteration block currently assigned to it. If the work-share
399 construct is bound directly to a parallel construct, then the iteration
400 bounds may have been set up before the parallel. In which case, this
401 may be the first iteration for the thread.
403 Returns true if there is work remaining to be performed; *ISTART and
404 *IEND are filled with a new iteration block. Returns false if all work
405 has been assigned. */
407 static bool
408 gomp_loop_static_next (long *istart, long *iend)
410 return !gomp_iter_static_next (istart, iend);
413 static bool
414 gomp_loop_dynamic_next (long *istart, long *iend)
416 bool ret;
418 #ifdef HAVE_SYNC_BUILTINS
419 ret = gomp_iter_dynamic_next (istart, iend);
420 #else
421 struct gomp_thread *thr = gomp_thread ();
422 gomp_mutex_lock (&thr->ts.work_share->lock);
423 ret = gomp_iter_dynamic_next_locked (istart, iend);
424 gomp_mutex_unlock (&thr->ts.work_share->lock);
425 #endif
427 return ret;
430 static bool
431 gomp_loop_guided_next (long *istart, long *iend)
433 bool ret;
435 #ifdef HAVE_SYNC_BUILTINS
436 ret = gomp_iter_guided_next (istart, iend);
437 #else
438 struct gomp_thread *thr = gomp_thread ();
439 gomp_mutex_lock (&thr->ts.work_share->lock);
440 ret = gomp_iter_guided_next_locked (istart, iend);
441 gomp_mutex_unlock (&thr->ts.work_share->lock);
442 #endif
444 return ret;
447 bool
448 GOMP_loop_runtime_next (long *istart, long *iend)
450 struct gomp_thread *thr = gomp_thread ();
452 switch (thr->ts.work_share->sched)
454 case GFS_STATIC:
455 case GFS_AUTO:
456 return gomp_loop_static_next (istart, iend);
457 case GFS_DYNAMIC:
458 return gomp_loop_dynamic_next (istart, iend);
459 case GFS_GUIDED:
460 return gomp_loop_guided_next (istart, iend);
461 default:
462 abort ();
466 /* The *_ordered_*_next routines are called when the thread completes
467 processing of the iteration block currently assigned to it.
469 Returns true if there is work remaining to be performed; *ISTART and
470 *IEND are filled with a new iteration block. Returns false if all work
471 has been assigned. */
473 static bool
474 gomp_loop_ordered_static_next (long *istart, long *iend)
476 struct gomp_thread *thr = gomp_thread ();
477 int test;
479 gomp_ordered_sync ();
480 gomp_mutex_lock (&thr->ts.work_share->lock);
481 test = gomp_iter_static_next (istart, iend);
482 if (test >= 0)
483 gomp_ordered_static_next ();
484 gomp_mutex_unlock (&thr->ts.work_share->lock);
486 return test == 0;
489 static bool
490 gomp_loop_ordered_dynamic_next (long *istart, long *iend)
492 struct gomp_thread *thr = gomp_thread ();
493 bool ret;
495 gomp_ordered_sync ();
496 gomp_mutex_lock (&thr->ts.work_share->lock);
497 ret = gomp_iter_dynamic_next_locked (istart, iend);
498 if (ret)
499 gomp_ordered_next ();
500 else
501 gomp_ordered_last ();
502 gomp_mutex_unlock (&thr->ts.work_share->lock);
504 return ret;
507 static bool
508 gomp_loop_ordered_guided_next (long *istart, long *iend)
510 struct gomp_thread *thr = gomp_thread ();
511 bool ret;
513 gomp_ordered_sync ();
514 gomp_mutex_lock (&thr->ts.work_share->lock);
515 ret = gomp_iter_guided_next_locked (istart, iend);
516 if (ret)
517 gomp_ordered_next ();
518 else
519 gomp_ordered_last ();
520 gomp_mutex_unlock (&thr->ts.work_share->lock);
522 return ret;
525 bool
526 GOMP_loop_ordered_runtime_next (long *istart, long *iend)
528 struct gomp_thread *thr = gomp_thread ();
530 switch (thr->ts.work_share->sched)
532 case GFS_STATIC:
533 case GFS_AUTO:
534 return gomp_loop_ordered_static_next (istart, iend);
535 case GFS_DYNAMIC:
536 return gomp_loop_ordered_dynamic_next (istart, iend);
537 case GFS_GUIDED:
538 return gomp_loop_ordered_guided_next (istart, iend);
539 default:
540 abort ();
544 /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
545 to avoid one synchronization once we get into the loop. */
547 static void
548 gomp_parallel_loop_start (void (*fn) (void *), void *data,
549 unsigned num_threads, long start, long end,
550 long incr, enum gomp_schedule_type sched,
551 long chunk_size, unsigned int flags)
553 struct gomp_team *team;
555 num_threads = gomp_resolve_num_threads (num_threads, 0);
556 team = gomp_new_team (num_threads);
557 gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size);
558 gomp_team_start (fn, data, num_threads, flags, team);
561 void
562 GOMP_parallel_loop_static_start (void (*fn) (void *), void *data,
563 unsigned num_threads, long start, long end,
564 long incr, long chunk_size)
566 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
567 GFS_STATIC, chunk_size, 0);
570 void
571 GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data,
572 unsigned num_threads, long start, long end,
573 long incr, long chunk_size)
575 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
576 GFS_DYNAMIC, chunk_size, 0);
579 void
580 GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data,
581 unsigned num_threads, long start, long end,
582 long incr, long chunk_size)
584 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
585 GFS_GUIDED, chunk_size, 0);
588 void
589 GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data,
590 unsigned num_threads, long start, long end,
591 long incr)
593 struct gomp_task_icv *icv = gomp_icv (false);
594 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
595 icv->run_sched_var, icv->run_sched_chunk_size, 0);
598 ialias_redirect (GOMP_parallel_end)
600 void
601 GOMP_parallel_loop_static (void (*fn) (void *), void *data,
602 unsigned num_threads, long start, long end,
603 long incr, long chunk_size, unsigned flags)
605 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
606 GFS_STATIC, chunk_size, flags);
607 fn (data);
608 GOMP_parallel_end ();
611 void
612 GOMP_parallel_loop_dynamic (void (*fn) (void *), void *data,
613 unsigned num_threads, long start, long end,
614 long incr, long chunk_size, unsigned flags)
616 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
617 GFS_DYNAMIC, chunk_size, flags);
618 fn (data);
619 GOMP_parallel_end ();
622 void
623 GOMP_parallel_loop_guided (void (*fn) (void *), void *data,
624 unsigned num_threads, long start, long end,
625 long incr, long chunk_size, unsigned flags)
627 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
628 GFS_GUIDED, chunk_size, flags);
629 fn (data);
630 GOMP_parallel_end ();
633 void
634 GOMP_parallel_loop_runtime (void (*fn) (void *), void *data,
635 unsigned num_threads, long start, long end,
636 long incr, unsigned flags)
638 struct gomp_task_icv *icv = gomp_icv (false);
639 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
640 icv->run_sched_var, icv->run_sched_chunk_size,
641 flags);
642 fn (data);
643 GOMP_parallel_end ();
646 /* The GOMP_loop_end* routines are called after the thread is told that
647 all loop iterations are complete. The first two versions synchronize
648 all threads; the nowait version does not. */
650 void
651 GOMP_loop_end (void)
653 gomp_work_share_end ();
656 bool
657 GOMP_loop_end_cancel (void)
659 return gomp_work_share_end_cancel ();
662 void
663 GOMP_loop_end_nowait (void)
665 gomp_work_share_end_nowait ();
669 /* We use static functions above so that we're sure that the "runtime"
670 function can defer to the proper routine without interposition. We
671 export the static function with a strong alias when possible, or with
672 a wrapper function otherwise. */
674 #ifdef HAVE_ATTRIBUTE_ALIAS
675 extern __typeof(gomp_loop_static_start) GOMP_loop_static_start
676 __attribute__((alias ("gomp_loop_static_start")));
677 extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start
678 __attribute__((alias ("gomp_loop_dynamic_start")));
679 extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start
680 __attribute__((alias ("gomp_loop_guided_start")));
682 extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start
683 __attribute__((alias ("gomp_loop_ordered_static_start")));
684 extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start
685 __attribute__((alias ("gomp_loop_ordered_dynamic_start")));
686 extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start
687 __attribute__((alias ("gomp_loop_ordered_guided_start")));
689 extern __typeof(gomp_loop_doacross_static_start) GOMP_loop_doacross_static_start
690 __attribute__((alias ("gomp_loop_doacross_static_start")));
691 extern __typeof(gomp_loop_doacross_dynamic_start) GOMP_loop_doacross_dynamic_start
692 __attribute__((alias ("gomp_loop_doacross_dynamic_start")));
693 extern __typeof(gomp_loop_doacross_guided_start) GOMP_loop_doacross_guided_start
694 __attribute__((alias ("gomp_loop_doacross_guided_start")));
696 extern __typeof(gomp_loop_static_next) GOMP_loop_static_next
697 __attribute__((alias ("gomp_loop_static_next")));
698 extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next
699 __attribute__((alias ("gomp_loop_dynamic_next")));
700 extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next
701 __attribute__((alias ("gomp_loop_guided_next")));
703 extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next
704 __attribute__((alias ("gomp_loop_ordered_static_next")));
705 extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next
706 __attribute__((alias ("gomp_loop_ordered_dynamic_next")));
707 extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next
708 __attribute__((alias ("gomp_loop_ordered_guided_next")));
709 #else
710 bool
711 GOMP_loop_static_start (long start, long end, long incr, long chunk_size,
712 long *istart, long *iend)
714 return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend);
717 bool
718 GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size,
719 long *istart, long *iend)
721 return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
724 bool
725 GOMP_loop_guided_start (long start, long end, long incr, long chunk_size,
726 long *istart, long *iend)
728 return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
731 bool
732 GOMP_loop_ordered_static_start (long start, long end, long incr,
733 long chunk_size, long *istart, long *iend)
735 return gomp_loop_ordered_static_start (start, end, incr, chunk_size,
736 istart, iend);
739 bool
740 GOMP_loop_ordered_dynamic_start (long start, long end, long incr,
741 long chunk_size, long *istart, long *iend)
743 return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size,
744 istart, iend);
747 bool
748 GOMP_loop_ordered_guided_start (long start, long end, long incr,
749 long chunk_size, long *istart, long *iend)
751 return gomp_loop_ordered_guided_start (start, end, incr, chunk_size,
752 istart, iend);
755 bool
756 GOMP_loop_doacross_static_start (unsigned ncounts, long *counts,
757 long chunk_size, long *istart, long *iend)
759 return gomp_loop_doacross_static_start (ncounts, counts, chunk_size,
760 istart, iend);
763 bool
764 GOMP_loop_doacross_dynamic_start (unsigned ncounts, long *counts,
765 long chunk_size, long *istart, long *iend)
767 return gomp_loop_doacross_dynamic_start (ncounts, counts, chunk_size,
768 istart, iend);
771 bool
772 GOMP_loop_doacross_guided_start (unsigned ncounts, long *counts,
773 long chunk_size, long *istart, long *iend)
775 return gomp_loop_doacross_guided_start (ncounts, counts, chunk_size,
776 istart, iend);
779 bool
780 GOMP_loop_static_next (long *istart, long *iend)
782 return gomp_loop_static_next (istart, iend);
785 bool
786 GOMP_loop_dynamic_next (long *istart, long *iend)
788 return gomp_loop_dynamic_next (istart, iend);
791 bool
792 GOMP_loop_guided_next (long *istart, long *iend)
794 return gomp_loop_guided_next (istart, iend);
797 bool
798 GOMP_loop_ordered_static_next (long *istart, long *iend)
800 return gomp_loop_ordered_static_next (istart, iend);
803 bool
804 GOMP_loop_ordered_dynamic_next (long *istart, long *iend)
806 return gomp_loop_ordered_dynamic_next (istart, iend);
809 bool
810 GOMP_loop_ordered_guided_next (long *istart, long *iend)
812 return gomp_loop_ordered_guided_next (istart, iend);
814 #endif