2018-01-22 Sebastian Perta <sebastian.perta@renesas.com>
[official-gcc.git] / libgomp / loop_ull.c
blob3d4ac994f0a9bedef07241d9f2e2bbb2bba2fb9f
1 /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
28 #include <limits.h>
29 #include <stdlib.h>
30 #include "libgomp.h"
32 typedef unsigned long long gomp_ull;
34 /* Initialize the given work share construct from the given arguments. */
36 static inline void
37 gomp_loop_ull_init (struct gomp_work_share *ws, bool up, gomp_ull start,
38 gomp_ull end, gomp_ull incr, enum gomp_schedule_type sched,
39 gomp_ull chunk_size)
41 ws->sched = sched;
42 ws->chunk_size_ull = chunk_size;
43 /* Canonicalize loops that have zero iterations to ->next == ->end. */
44 ws->end_ull = ((up && start > end) || (!up && start < end))
45 ? start : end;
46 ws->incr_ull = incr;
47 ws->next_ull = start;
48 ws->mode = 0;
49 if (sched == GFS_DYNAMIC)
51 ws->chunk_size_ull *= incr;
53 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
55 /* For dynamic scheduling prepare things to make each iteration
56 faster. */
57 struct gomp_thread *thr = gomp_thread ();
58 struct gomp_team *team = thr->ts.team;
59 long nthreads = team ? team->nthreads : 1;
61 if (__builtin_expect (up, 1))
63 /* Cheap overflow protection. */
64 if (__builtin_expect ((nthreads | ws->chunk_size_ull)
65 < 1ULL << (sizeof (gomp_ull)
66 * __CHAR_BIT__ / 2 - 1), 1))
67 ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1
68 - (nthreads + 1) * ws->chunk_size_ull);
70 /* Cheap overflow protection. */
71 else if (__builtin_expect ((nthreads | -ws->chunk_size_ull)
72 < 1ULL << (sizeof (gomp_ull)
73 * __CHAR_BIT__ / 2 - 1), 1))
74 ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull
75 - (__LONG_LONG_MAX__ * 2ULL + 1));
77 #endif
79 if (!up)
80 ws->mode |= 2;
83 /* The *_start routines are called when first encountering a loop construct
84 that is not bound directly to a parallel construct. The first thread
85 that arrives will create the work-share construct; subsequent threads
86 will see the construct exists and allocate work from it.
88 START, END, INCR are the bounds of the loop; due to the restrictions of
89 OpenMP, these values must be the same in every thread. This is not
90 verified (nor is it entirely verifiable, since START is not necessarily
91 retained intact in the work-share data structure). CHUNK_SIZE is the
92 scheduling parameter; again this must be identical in all threads.
94 Returns true if there's any work for this thread to perform. If so,
95 *ISTART and *IEND are filled with the bounds of the iteration block
96 allocated to this thread. Returns false if all work was assigned to
97 other threads prior to this thread's arrival. */
99 static bool
100 gomp_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end,
101 gomp_ull incr, gomp_ull chunk_size,
102 gomp_ull *istart, gomp_ull *iend)
104 struct gomp_thread *thr = gomp_thread ();
106 thr->ts.static_trip = 0;
107 if (gomp_work_share_start (false))
109 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
110 GFS_STATIC, chunk_size);
111 gomp_work_share_init_done ();
114 return !gomp_iter_ull_static_next (istart, iend);
117 static bool
118 gomp_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end,
119 gomp_ull incr, gomp_ull chunk_size,
120 gomp_ull *istart, gomp_ull *iend)
122 struct gomp_thread *thr = gomp_thread ();
123 bool ret;
125 if (gomp_work_share_start (false))
127 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
128 GFS_DYNAMIC, chunk_size);
129 gomp_work_share_init_done ();
132 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
133 ret = gomp_iter_ull_dynamic_next (istart, iend);
134 #else
135 gomp_mutex_lock (&thr->ts.work_share->lock);
136 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
137 gomp_mutex_unlock (&thr->ts.work_share->lock);
138 #endif
140 return ret;
143 static bool
144 gomp_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
145 gomp_ull incr, gomp_ull chunk_size,
146 gomp_ull *istart, gomp_ull *iend)
148 struct gomp_thread *thr = gomp_thread ();
149 bool ret;
151 if (gomp_work_share_start (false))
153 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
154 GFS_GUIDED, chunk_size);
155 gomp_work_share_init_done ();
158 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
159 ret = gomp_iter_ull_guided_next (istart, iend);
160 #else
161 gomp_mutex_lock (&thr->ts.work_share->lock);
162 ret = gomp_iter_ull_guided_next_locked (istart, iend);
163 gomp_mutex_unlock (&thr->ts.work_share->lock);
164 #endif
166 return ret;
169 bool
170 GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end,
171 gomp_ull incr, gomp_ull *istart, gomp_ull *iend)
173 struct gomp_task_icv *icv = gomp_icv (false);
174 switch (icv->run_sched_var)
176 case GFS_STATIC:
177 return gomp_loop_ull_static_start (up, start, end, incr,
178 icv->run_sched_chunk_size,
179 istart, iend);
180 case GFS_DYNAMIC:
181 return gomp_loop_ull_dynamic_start (up, start, end, incr,
182 icv->run_sched_chunk_size,
183 istart, iend);
184 case GFS_GUIDED:
185 return gomp_loop_ull_guided_start (up, start, end, incr,
186 icv->run_sched_chunk_size,
187 istart, iend);
188 case GFS_AUTO:
189 /* For now map to schedule(static), later on we could play with feedback
190 driven choice. */
191 return gomp_loop_ull_static_start (up, start, end, incr,
192 0, istart, iend);
193 default:
194 abort ();
198 /* The *_ordered_*_start routines are similar. The only difference is that
199 this work-share construct is initialized to expect an ORDERED section. */
201 static bool
202 gomp_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
203 gomp_ull incr, gomp_ull chunk_size,
204 gomp_ull *istart, gomp_ull *iend)
206 struct gomp_thread *thr = gomp_thread ();
208 thr->ts.static_trip = 0;
209 if (gomp_work_share_start (true))
211 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
212 GFS_STATIC, chunk_size);
213 gomp_ordered_static_init ();
214 gomp_work_share_init_done ();
217 return !gomp_iter_ull_static_next (istart, iend);
220 static bool
221 gomp_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end,
222 gomp_ull incr, gomp_ull chunk_size,
223 gomp_ull *istart, gomp_ull *iend)
225 struct gomp_thread *thr = gomp_thread ();
226 bool ret;
228 if (gomp_work_share_start (true))
230 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
231 GFS_DYNAMIC, chunk_size);
232 gomp_mutex_lock (&thr->ts.work_share->lock);
233 gomp_work_share_init_done ();
235 else
236 gomp_mutex_lock (&thr->ts.work_share->lock);
238 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
239 if (ret)
240 gomp_ordered_first ();
241 gomp_mutex_unlock (&thr->ts.work_share->lock);
243 return ret;
246 static bool
247 gomp_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
248 gomp_ull incr, gomp_ull chunk_size,
249 gomp_ull *istart, gomp_ull *iend)
251 struct gomp_thread *thr = gomp_thread ();
252 bool ret;
254 if (gomp_work_share_start (true))
256 gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr,
257 GFS_GUIDED, chunk_size);
258 gomp_mutex_lock (&thr->ts.work_share->lock);
259 gomp_work_share_init_done ();
261 else
262 gomp_mutex_lock (&thr->ts.work_share->lock);
264 ret = gomp_iter_ull_guided_next_locked (istart, iend);
265 if (ret)
266 gomp_ordered_first ();
267 gomp_mutex_unlock (&thr->ts.work_share->lock);
269 return ret;
272 bool
273 GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end,
274 gomp_ull incr, gomp_ull *istart,
275 gomp_ull *iend)
277 struct gomp_task_icv *icv = gomp_icv (false);
278 switch (icv->run_sched_var)
280 case GFS_STATIC:
281 return gomp_loop_ull_ordered_static_start (up, start, end, incr,
282 icv->run_sched_chunk_size,
283 istart, iend);
284 case GFS_DYNAMIC:
285 return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr,
286 icv->run_sched_chunk_size,
287 istart, iend);
288 case GFS_GUIDED:
289 return gomp_loop_ull_ordered_guided_start (up, start, end, incr,
290 icv->run_sched_chunk_size,
291 istart, iend);
292 case GFS_AUTO:
293 /* For now map to schedule(static), later on we could play with feedback
294 driven choice. */
295 return gomp_loop_ull_ordered_static_start (up, start, end, incr,
296 0, istart, iend);
297 default:
298 abort ();
302 /* The *_doacross_*_start routines are similar. The only difference is that
303 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
304 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
305 and other COUNTS array elements tell the library number of iterations
306 in the ordered inner loops. */
308 static bool
309 gomp_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
310 gomp_ull chunk_size, gomp_ull *istart,
311 gomp_ull *iend)
313 struct gomp_thread *thr = gomp_thread ();
315 thr->ts.static_trip = 0;
316 if (gomp_work_share_start (false))
318 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
319 GFS_STATIC, chunk_size);
320 gomp_doacross_ull_init (ncounts, counts, chunk_size);
321 gomp_work_share_init_done ();
324 return !gomp_iter_ull_static_next (istart, iend);
327 static bool
328 gomp_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
329 gomp_ull chunk_size, gomp_ull *istart,
330 gomp_ull *iend)
332 struct gomp_thread *thr = gomp_thread ();
333 bool ret;
335 if (gomp_work_share_start (false))
337 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
338 GFS_DYNAMIC, chunk_size);
339 gomp_doacross_ull_init (ncounts, counts, chunk_size);
340 gomp_work_share_init_done ();
343 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
344 ret = gomp_iter_ull_dynamic_next (istart, iend);
345 #else
346 gomp_mutex_lock (&thr->ts.work_share->lock);
347 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
348 gomp_mutex_unlock (&thr->ts.work_share->lock);
349 #endif
351 return ret;
354 static bool
355 gomp_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
356 gomp_ull chunk_size, gomp_ull *istart,
357 gomp_ull *iend)
359 struct gomp_thread *thr = gomp_thread ();
360 bool ret;
362 if (gomp_work_share_start (false))
364 gomp_loop_ull_init (thr->ts.work_share, true, 0, counts[0], 1,
365 GFS_GUIDED, chunk_size);
366 gomp_doacross_ull_init (ncounts, counts, chunk_size);
367 gomp_work_share_init_done ();
370 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
371 ret = gomp_iter_ull_guided_next (istart, iend);
372 #else
373 gomp_mutex_lock (&thr->ts.work_share->lock);
374 ret = gomp_iter_ull_guided_next_locked (istart, iend);
375 gomp_mutex_unlock (&thr->ts.work_share->lock);
376 #endif
378 return ret;
381 bool
382 GOMP_loop_ull_doacross_runtime_start (unsigned ncounts, gomp_ull *counts,
383 gomp_ull *istart, gomp_ull *iend)
385 struct gomp_task_icv *icv = gomp_icv (false);
386 switch (icv->run_sched_var)
388 case GFS_STATIC:
389 return gomp_loop_ull_doacross_static_start (ncounts, counts,
390 icv->run_sched_chunk_size,
391 istart, iend);
392 case GFS_DYNAMIC:
393 return gomp_loop_ull_doacross_dynamic_start (ncounts, counts,
394 icv->run_sched_chunk_size,
395 istart, iend);
396 case GFS_GUIDED:
397 return gomp_loop_ull_doacross_guided_start (ncounts, counts,
398 icv->run_sched_chunk_size,
399 istart, iend);
400 case GFS_AUTO:
401 /* For now map to schedule(static), later on we could play with feedback
402 driven choice. */
403 return gomp_loop_ull_doacross_static_start (ncounts, counts,
404 0, istart, iend);
405 default:
406 abort ();
410 /* The *_next routines are called when the thread completes processing of
411 the iteration block currently assigned to it. If the work-share
412 construct is bound directly to a parallel construct, then the iteration
413 bounds may have been set up before the parallel. In which case, this
414 may be the first iteration for the thread.
416 Returns true if there is work remaining to be performed; *ISTART and
417 *IEND are filled with a new iteration block. Returns false if all work
418 has been assigned. */
420 static bool
421 gomp_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
423 return !gomp_iter_ull_static_next (istart, iend);
426 static bool
427 gomp_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend)
429 bool ret;
431 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
432 ret = gomp_iter_ull_dynamic_next (istart, iend);
433 #else
434 struct gomp_thread *thr = gomp_thread ();
435 gomp_mutex_lock (&thr->ts.work_share->lock);
436 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
437 gomp_mutex_unlock (&thr->ts.work_share->lock);
438 #endif
440 return ret;
443 static bool
444 gomp_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
446 bool ret;
448 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
449 ret = gomp_iter_ull_guided_next (istart, iend);
450 #else
451 struct gomp_thread *thr = gomp_thread ();
452 gomp_mutex_lock (&thr->ts.work_share->lock);
453 ret = gomp_iter_ull_guided_next_locked (istart, iend);
454 gomp_mutex_unlock (&thr->ts.work_share->lock);
455 #endif
457 return ret;
460 bool
461 GOMP_loop_ull_runtime_next (gomp_ull *istart, gomp_ull *iend)
463 struct gomp_thread *thr = gomp_thread ();
465 switch (thr->ts.work_share->sched)
467 case GFS_STATIC:
468 case GFS_AUTO:
469 return gomp_loop_ull_static_next (istart, iend);
470 case GFS_DYNAMIC:
471 return gomp_loop_ull_dynamic_next (istart, iend);
472 case GFS_GUIDED:
473 return gomp_loop_ull_guided_next (istart, iend);
474 default:
475 abort ();
479 /* The *_ordered_*_next routines are called when the thread completes
480 processing of the iteration block currently assigned to it.
482 Returns true if there is work remaining to be performed; *ISTART and
483 *IEND are filled with a new iteration block. Returns false if all work
484 has been assigned. */
486 static bool
487 gomp_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
489 struct gomp_thread *thr = gomp_thread ();
490 int test;
492 gomp_ordered_sync ();
493 gomp_mutex_lock (&thr->ts.work_share->lock);
494 test = gomp_iter_ull_static_next (istart, iend);
495 if (test >= 0)
496 gomp_ordered_static_next ();
497 gomp_mutex_unlock (&thr->ts.work_share->lock);
499 return test == 0;
502 static bool
503 gomp_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend)
505 struct gomp_thread *thr = gomp_thread ();
506 bool ret;
508 gomp_ordered_sync ();
509 gomp_mutex_lock (&thr->ts.work_share->lock);
510 ret = gomp_iter_ull_dynamic_next_locked (istart, iend);
511 if (ret)
512 gomp_ordered_next ();
513 else
514 gomp_ordered_last ();
515 gomp_mutex_unlock (&thr->ts.work_share->lock);
517 return ret;
520 static bool
521 gomp_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend)
523 struct gomp_thread *thr = gomp_thread ();
524 bool ret;
526 gomp_ordered_sync ();
527 gomp_mutex_lock (&thr->ts.work_share->lock);
528 ret = gomp_iter_ull_guided_next_locked (istart, iend);
529 if (ret)
530 gomp_ordered_next ();
531 else
532 gomp_ordered_last ();
533 gomp_mutex_unlock (&thr->ts.work_share->lock);
535 return ret;
538 bool
539 GOMP_loop_ull_ordered_runtime_next (gomp_ull *istart, gomp_ull *iend)
541 struct gomp_thread *thr = gomp_thread ();
543 switch (thr->ts.work_share->sched)
545 case GFS_STATIC:
546 case GFS_AUTO:
547 return gomp_loop_ull_ordered_static_next (istart, iend);
548 case GFS_DYNAMIC:
549 return gomp_loop_ull_ordered_dynamic_next (istart, iend);
550 case GFS_GUIDED:
551 return gomp_loop_ull_ordered_guided_next (istart, iend);
552 default:
553 abort ();
557 /* We use static functions above so that we're sure that the "runtime"
558 function can defer to the proper routine without interposition. We
559 export the static function with a strong alias when possible, or with
560 a wrapper function otherwise. */
562 #ifdef HAVE_ATTRIBUTE_ALIAS
563 extern __typeof(gomp_loop_ull_static_start) GOMP_loop_ull_static_start
564 __attribute__((alias ("gomp_loop_ull_static_start")));
565 extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start
566 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
567 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start
568 __attribute__((alias ("gomp_loop_ull_guided_start")));
569 extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_nonmonotonic_dynamic_start
570 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
571 extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_nonmonotonic_guided_start
572 __attribute__((alias ("gomp_loop_ull_guided_start")));
574 extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start
575 __attribute__((alias ("gomp_loop_ull_ordered_static_start")));
576 extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynamic_start
577 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start")));
578 extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start
579 __attribute__((alias ("gomp_loop_ull_ordered_guided_start")));
581 extern __typeof(gomp_loop_ull_doacross_static_start) GOMP_loop_ull_doacross_static_start
582 __attribute__((alias ("gomp_loop_ull_doacross_static_start")));
583 extern __typeof(gomp_loop_ull_doacross_dynamic_start) GOMP_loop_ull_doacross_dynamic_start
584 __attribute__((alias ("gomp_loop_ull_doacross_dynamic_start")));
585 extern __typeof(gomp_loop_ull_doacross_guided_start) GOMP_loop_ull_doacross_guided_start
586 __attribute__((alias ("gomp_loop_ull_doacross_guided_start")));
588 extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next
589 __attribute__((alias ("gomp_loop_ull_static_next")));
590 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next
591 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
592 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next
593 __attribute__((alias ("gomp_loop_ull_guided_next")));
594 extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_nonmonotonic_dynamic_next
595 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
596 extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_nonmonotonic_guided_next
597 __attribute__((alias ("gomp_loop_ull_guided_next")));
599 extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next
600 __attribute__((alias ("gomp_loop_ull_ordered_static_next")));
601 extern __typeof(gomp_loop_ull_ordered_dynamic_next) GOMP_loop_ull_ordered_dynamic_next
602 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next")));
603 extern __typeof(gomp_loop_ull_ordered_guided_next) GOMP_loop_ull_ordered_guided_next
604 __attribute__((alias ("gomp_loop_ull_ordered_guided_next")));
605 #else
606 bool
607 GOMP_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end,
608 gomp_ull incr, gomp_ull chunk_size,
609 gomp_ull *istart, gomp_ull *iend)
611 return gomp_loop_ull_static_start (up, start, end, incr, chunk_size, istart,
612 iend);
615 bool
616 GOMP_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end,
617 gomp_ull incr, gomp_ull chunk_size,
618 gomp_ull *istart, gomp_ull *iend)
620 return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
621 iend);
624 bool
625 GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end,
626 gomp_ull incr, gomp_ull chunk_size,
627 gomp_ull *istart, gomp_ull *iend)
629 return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
630 iend);
633 bool
634 GOMP_loop_ull_nonmonotonic_dynamic_start (bool up, gomp_ull start,
635 gomp_ull end, gomp_ull incr,
636 gomp_ull chunk_size,
637 gomp_ull *istart, gomp_ull *iend)
639 return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart,
640 iend);
643 bool
644 GOMP_loop_ull_nonmonotonic_guided_start (bool up, gomp_ull start, gomp_ull end,
645 gomp_ull incr, gomp_ull chunk_size,
646 gomp_ull *istart, gomp_ull *iend)
648 return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart,
649 iend);
652 bool
653 GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end,
654 gomp_ull incr, gomp_ull chunk_size,
655 gomp_ull *istart, gomp_ull *iend)
657 return gomp_loop_ull_ordered_static_start (up, start, end, incr, chunk_size,
658 istart, iend);
661 bool
662 GOMP_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end,
663 gomp_ull incr, gomp_ull chunk_size,
664 gomp_ull *istart, gomp_ull *iend)
666 return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, chunk_size,
667 istart, iend);
670 bool
671 GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end,
672 gomp_ull incr, gomp_ull chunk_size,
673 gomp_ull *istart, gomp_ull *iend)
675 return gomp_loop_ull_ordered_guided_start (up, start, end, incr, chunk_size,
676 istart, iend);
679 bool
680 GOMP_loop_ull_doacross_static_start (unsigned ncounts, gomp_ull *counts,
681 gomp_ull chunk_size, gomp_ull *istart,
682 gomp_ull *iend)
684 return gomp_loop_ull_doacross_static_start (ncounts, counts, chunk_size,
685 istart, iend);
688 bool
689 GOMP_loop_ull_doacross_dynamic_start (unsigned ncounts, gomp_ull *counts,
690 gomp_ull chunk_size, gomp_ull *istart,
691 gomp_ull *iend)
693 return gomp_loop_ull_doacross_dynamic_start (ncounts, counts, chunk_size,
694 istart, iend);
697 bool
698 GOMP_loop_ull_doacross_guided_start (unsigned ncounts, gomp_ull *counts,
699 gomp_ull chunk_size, gomp_ull *istart,
700 gomp_ull *iend)
702 return gomp_loop_ull_doacross_guided_start (ncounts, counts, chunk_size,
703 istart, iend);
706 bool
707 GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend)
709 return gomp_loop_ull_static_next (istart, iend);
712 bool
713 GOMP_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend)
715 return gomp_loop_ull_dynamic_next (istart, iend);
718 bool
719 GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend)
721 return gomp_loop_ull_guided_next (istart, iend);
724 bool
725 GOMP_loop_ull_nonmonotonic_dynamic_next (gomp_ull *istart, gomp_ull *iend)
727 return gomp_loop_ull_dynamic_next (istart, iend);
730 bool
731 GOMP_loop_ull_nonmonotonic_guided_next (gomp_ull *istart, gomp_ull *iend)
733 return gomp_loop_ull_guided_next (istart, iend);
736 bool
737 GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend)
739 return gomp_loop_ull_ordered_static_next (istart, iend);
742 bool
743 GOMP_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend)
745 return gomp_loop_ull_ordered_dynamic_next (istart, iend);
748 bool
749 GOMP_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend)
751 return gomp_loop_ull_ordered_guided_next (istart, iend);
753 #endif