1 /* Copyright (C) 2005-2016 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
32 typedef unsigned long long gomp_ull
;
34 /* Initialize the given work share construct from the given arguments. */
37 gomp_loop_ull_init (struct gomp_work_share
*ws
, bool up
, gomp_ull start
,
38 gomp_ull end
, gomp_ull incr
, enum gomp_schedule_type sched
,
42 ws
->chunk_size_ull
= chunk_size
;
43 /* Canonicalize loops that have zero iterations to ->next == ->end. */
44 ws
->end_ull
= ((up
&& start
> end
) || (!up
&& start
< end
))
49 if (sched
== GFS_DYNAMIC
)
51 ws
->chunk_size_ull
*= incr
;
53 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
55 /* For dynamic scheduling prepare things to make each iteration
57 struct gomp_thread
*thr
= gomp_thread ();
58 struct gomp_team
*team
= thr
->ts
.team
;
59 long nthreads
= team
? team
->nthreads
: 1;
61 if (__builtin_expect (up
, 1))
63 /* Cheap overflow protection. */
64 if (__builtin_expect ((nthreads
| ws
->chunk_size_ull
)
65 < 1ULL << (sizeof (gomp_ull
)
66 * __CHAR_BIT__
/ 2 - 1), 1))
67 ws
->mode
= ws
->end_ull
< (__LONG_LONG_MAX__
* 2ULL + 1
68 - (nthreads
+ 1) * ws
->chunk_size_ull
);
70 /* Cheap overflow protection. */
71 else if (__builtin_expect ((nthreads
| -ws
->chunk_size_ull
)
72 < 1ULL << (sizeof (gomp_ull
)
73 * __CHAR_BIT__
/ 2 - 1), 1))
74 ws
->mode
= ws
->end_ull
> ((nthreads
+ 1) * -ws
->chunk_size_ull
75 - (__LONG_LONG_MAX__
* 2ULL + 1));
83 /* The *_start routines are called when first encountering a loop construct
84 that is not bound directly to a parallel construct. The first thread
85 that arrives will create the work-share construct; subsequent threads
86 will see the construct exists and allocate work from it.
88 START, END, INCR are the bounds of the loop; due to the restrictions of
89 OpenMP, these values must be the same in every thread. This is not
90 verified (nor is it entirely verifiable, since START is not necessarily
91 retained intact in the work-share data structure). CHUNK_SIZE is the
92 scheduling parameter; again this must be identical in all threads.
94 Returns true if there's any work for this thread to perform. If so,
95 *ISTART and *IEND are filled with the bounds of the iteration block
96 allocated to this thread. Returns false if all work was assigned to
97 other threads prior to this thread's arrival. */
100 gomp_loop_ull_static_start (bool up
, gomp_ull start
, gomp_ull end
,
101 gomp_ull incr
, gomp_ull chunk_size
,
102 gomp_ull
*istart
, gomp_ull
*iend
)
104 struct gomp_thread
*thr
= gomp_thread ();
106 thr
->ts
.static_trip
= 0;
107 if (gomp_work_share_start (false))
109 gomp_loop_ull_init (thr
->ts
.work_share
, up
, start
, end
, incr
,
110 GFS_STATIC
, chunk_size
);
111 gomp_work_share_init_done ();
114 return !gomp_iter_ull_static_next (istart
, iend
);
118 gomp_loop_ull_dynamic_start (bool up
, gomp_ull start
, gomp_ull end
,
119 gomp_ull incr
, gomp_ull chunk_size
,
120 gomp_ull
*istart
, gomp_ull
*iend
)
122 struct gomp_thread
*thr
= gomp_thread ();
125 if (gomp_work_share_start (false))
127 gomp_loop_ull_init (thr
->ts
.work_share
, up
, start
, end
, incr
,
128 GFS_DYNAMIC
, chunk_size
);
129 gomp_work_share_init_done ();
132 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
133 ret
= gomp_iter_ull_dynamic_next (istart
, iend
);
135 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
136 ret
= gomp_iter_ull_dynamic_next_locked (istart
, iend
);
137 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
144 gomp_loop_ull_guided_start (bool up
, gomp_ull start
, gomp_ull end
,
145 gomp_ull incr
, gomp_ull chunk_size
,
146 gomp_ull
*istart
, gomp_ull
*iend
)
148 struct gomp_thread
*thr
= gomp_thread ();
151 if (gomp_work_share_start (false))
153 gomp_loop_ull_init (thr
->ts
.work_share
, up
, start
, end
, incr
,
154 GFS_GUIDED
, chunk_size
);
155 gomp_work_share_init_done ();
158 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
159 ret
= gomp_iter_ull_guided_next (istart
, iend
);
161 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
162 ret
= gomp_iter_ull_guided_next_locked (istart
, iend
);
163 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
170 GOMP_loop_ull_runtime_start (bool up
, gomp_ull start
, gomp_ull end
,
171 gomp_ull incr
, gomp_ull
*istart
, gomp_ull
*iend
)
173 struct gomp_task_icv
*icv
= gomp_icv (false);
174 switch (icv
->run_sched_var
)
177 return gomp_loop_ull_static_start (up
, start
, end
, incr
,
178 icv
->run_sched_chunk_size
,
181 return gomp_loop_ull_dynamic_start (up
, start
, end
, incr
,
182 icv
->run_sched_chunk_size
,
185 return gomp_loop_ull_guided_start (up
, start
, end
, incr
,
186 icv
->run_sched_chunk_size
,
189 /* For now map to schedule(static), later on we could play with feedback
191 return gomp_loop_ull_static_start (up
, start
, end
, incr
,
198 /* The *_ordered_*_start routines are similar. The only difference is that
199 this work-share construct is initialized to expect an ORDERED section. */
202 gomp_loop_ull_ordered_static_start (bool up
, gomp_ull start
, gomp_ull end
,
203 gomp_ull incr
, gomp_ull chunk_size
,
204 gomp_ull
*istart
, gomp_ull
*iend
)
206 struct gomp_thread
*thr
= gomp_thread ();
208 thr
->ts
.static_trip
= 0;
209 if (gomp_work_share_start (true))
211 gomp_loop_ull_init (thr
->ts
.work_share
, up
, start
, end
, incr
,
212 GFS_STATIC
, chunk_size
);
213 gomp_ordered_static_init ();
214 gomp_work_share_init_done ();
217 return !gomp_iter_ull_static_next (istart
, iend
);
221 gomp_loop_ull_ordered_dynamic_start (bool up
, gomp_ull start
, gomp_ull end
,
222 gomp_ull incr
, gomp_ull chunk_size
,
223 gomp_ull
*istart
, gomp_ull
*iend
)
225 struct gomp_thread
*thr
= gomp_thread ();
228 if (gomp_work_share_start (true))
230 gomp_loop_ull_init (thr
->ts
.work_share
, up
, start
, end
, incr
,
231 GFS_DYNAMIC
, chunk_size
);
232 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
233 gomp_work_share_init_done ();
236 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
238 ret
= gomp_iter_ull_dynamic_next_locked (istart
, iend
);
240 gomp_ordered_first ();
241 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
247 gomp_loop_ull_ordered_guided_start (bool up
, gomp_ull start
, gomp_ull end
,
248 gomp_ull incr
, gomp_ull chunk_size
,
249 gomp_ull
*istart
, gomp_ull
*iend
)
251 struct gomp_thread
*thr
= gomp_thread ();
254 if (gomp_work_share_start (true))
256 gomp_loop_ull_init (thr
->ts
.work_share
, up
, start
, end
, incr
,
257 GFS_GUIDED
, chunk_size
);
258 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
259 gomp_work_share_init_done ();
262 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
264 ret
= gomp_iter_ull_guided_next_locked (istart
, iend
);
266 gomp_ordered_first ();
267 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
273 GOMP_loop_ull_ordered_runtime_start (bool up
, gomp_ull start
, gomp_ull end
,
274 gomp_ull incr
, gomp_ull
*istart
,
277 struct gomp_task_icv
*icv
= gomp_icv (false);
278 switch (icv
->run_sched_var
)
281 return gomp_loop_ull_ordered_static_start (up
, start
, end
, incr
,
282 icv
->run_sched_chunk_size
,
285 return gomp_loop_ull_ordered_dynamic_start (up
, start
, end
, incr
,
286 icv
->run_sched_chunk_size
,
289 return gomp_loop_ull_ordered_guided_start (up
, start
, end
, incr
,
290 icv
->run_sched_chunk_size
,
293 /* For now map to schedule(static), later on we could play with feedback
295 return gomp_loop_ull_ordered_static_start (up
, start
, end
, incr
,
302 /* The *_doacross_*_start routines are similar. The only difference is that
303 this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
304 section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
305 and other COUNTS array elements tell the library number of iterations
306 in the ordered inner loops. */
309 gomp_loop_ull_doacross_static_start (unsigned ncounts
, gomp_ull
*counts
,
310 gomp_ull chunk_size
, gomp_ull
*istart
,
313 struct gomp_thread
*thr
= gomp_thread ();
315 thr
->ts
.static_trip
= 0;
316 if (gomp_work_share_start (false))
318 gomp_loop_ull_init (thr
->ts
.work_share
, true, 0, counts
[0], 1,
319 GFS_STATIC
, chunk_size
);
320 gomp_doacross_ull_init (ncounts
, counts
, chunk_size
);
321 gomp_work_share_init_done ();
324 return !gomp_iter_ull_static_next (istart
, iend
);
328 gomp_loop_ull_doacross_dynamic_start (unsigned ncounts
, gomp_ull
*counts
,
329 gomp_ull chunk_size
, gomp_ull
*istart
,
332 struct gomp_thread
*thr
= gomp_thread ();
335 if (gomp_work_share_start (false))
337 gomp_loop_ull_init (thr
->ts
.work_share
, true, 0, counts
[0], 1,
338 GFS_DYNAMIC
, chunk_size
);
339 gomp_doacross_ull_init (ncounts
, counts
, chunk_size
);
340 gomp_work_share_init_done ();
343 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
344 ret
= gomp_iter_ull_dynamic_next (istart
, iend
);
346 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
347 ret
= gomp_iter_ull_dynamic_next_locked (istart
, iend
);
348 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
355 gomp_loop_ull_doacross_guided_start (unsigned ncounts
, gomp_ull
*counts
,
356 gomp_ull chunk_size
, gomp_ull
*istart
,
359 struct gomp_thread
*thr
= gomp_thread ();
362 if (gomp_work_share_start (false))
364 gomp_loop_ull_init (thr
->ts
.work_share
, true, 0, counts
[0], 1,
365 GFS_GUIDED
, chunk_size
);
366 gomp_doacross_ull_init (ncounts
, counts
, chunk_size
);
367 gomp_work_share_init_done ();
370 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
371 ret
= gomp_iter_ull_guided_next (istart
, iend
);
373 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
374 ret
= gomp_iter_ull_guided_next_locked (istart
, iend
);
375 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
382 GOMP_loop_ull_doacross_runtime_start (unsigned ncounts
, gomp_ull
*counts
,
383 gomp_ull
*istart
, gomp_ull
*iend
)
385 struct gomp_task_icv
*icv
= gomp_icv (false);
386 switch (icv
->run_sched_var
)
389 return gomp_loop_ull_doacross_static_start (ncounts
, counts
,
390 icv
->run_sched_chunk_size
,
393 return gomp_loop_ull_doacross_dynamic_start (ncounts
, counts
,
394 icv
->run_sched_chunk_size
,
397 return gomp_loop_ull_doacross_guided_start (ncounts
, counts
,
398 icv
->run_sched_chunk_size
,
401 /* For now map to schedule(static), later on we could play with feedback
403 return gomp_loop_ull_doacross_static_start (ncounts
, counts
,
410 /* The *_next routines are called when the thread completes processing of
411 the iteration block currently assigned to it. If the work-share
412 construct is bound directly to a parallel construct, then the iteration
413 bounds may have been set up before the parallel. In which case, this
414 may be the first iteration for the thread.
416 Returns true if there is work remaining to be performed; *ISTART and
417 *IEND are filled with a new iteration block. Returns false if all work
418 has been assigned. */
421 gomp_loop_ull_static_next (gomp_ull
*istart
, gomp_ull
*iend
)
423 return !gomp_iter_ull_static_next (istart
, iend
);
427 gomp_loop_ull_dynamic_next (gomp_ull
*istart
, gomp_ull
*iend
)
431 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
432 ret
= gomp_iter_ull_dynamic_next (istart
, iend
);
434 struct gomp_thread
*thr
= gomp_thread ();
435 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
436 ret
= gomp_iter_ull_dynamic_next_locked (istart
, iend
);
437 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
444 gomp_loop_ull_guided_next (gomp_ull
*istart
, gomp_ull
*iend
)
448 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
449 ret
= gomp_iter_ull_guided_next (istart
, iend
);
451 struct gomp_thread
*thr
= gomp_thread ();
452 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
453 ret
= gomp_iter_ull_guided_next_locked (istart
, iend
);
454 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
461 GOMP_loop_ull_runtime_next (gomp_ull
*istart
, gomp_ull
*iend
)
463 struct gomp_thread
*thr
= gomp_thread ();
465 switch (thr
->ts
.work_share
->sched
)
469 return gomp_loop_ull_static_next (istart
, iend
);
471 return gomp_loop_ull_dynamic_next (istart
, iend
);
473 return gomp_loop_ull_guided_next (istart
, iend
);
479 /* The *_ordered_*_next routines are called when the thread completes
480 processing of the iteration block currently assigned to it.
482 Returns true if there is work remaining to be performed; *ISTART and
483 *IEND are filled with a new iteration block. Returns false if all work
484 has been assigned. */
487 gomp_loop_ull_ordered_static_next (gomp_ull
*istart
, gomp_ull
*iend
)
489 struct gomp_thread
*thr
= gomp_thread ();
492 gomp_ordered_sync ();
493 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
494 test
= gomp_iter_ull_static_next (istart
, iend
);
496 gomp_ordered_static_next ();
497 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
503 gomp_loop_ull_ordered_dynamic_next (gomp_ull
*istart
, gomp_ull
*iend
)
505 struct gomp_thread
*thr
= gomp_thread ();
508 gomp_ordered_sync ();
509 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
510 ret
= gomp_iter_ull_dynamic_next_locked (istart
, iend
);
512 gomp_ordered_next ();
514 gomp_ordered_last ();
515 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
521 gomp_loop_ull_ordered_guided_next (gomp_ull
*istart
, gomp_ull
*iend
)
523 struct gomp_thread
*thr
= gomp_thread ();
526 gomp_ordered_sync ();
527 gomp_mutex_lock (&thr
->ts
.work_share
->lock
);
528 ret
= gomp_iter_ull_guided_next_locked (istart
, iend
);
530 gomp_ordered_next ();
532 gomp_ordered_last ();
533 gomp_mutex_unlock (&thr
->ts
.work_share
->lock
);
539 GOMP_loop_ull_ordered_runtime_next (gomp_ull
*istart
, gomp_ull
*iend
)
541 struct gomp_thread
*thr
= gomp_thread ();
543 switch (thr
->ts
.work_share
->sched
)
547 return gomp_loop_ull_ordered_static_next (istart
, iend
);
549 return gomp_loop_ull_ordered_dynamic_next (istart
, iend
);
551 return gomp_loop_ull_ordered_guided_next (istart
, iend
);
557 /* We use static functions above so that we're sure that the "runtime"
558 function can defer to the proper routine without interposition. We
559 export the static function with a strong alias when possible, or with
560 a wrapper function otherwise. */
562 #ifdef HAVE_ATTRIBUTE_ALIAS
563 extern __typeof(gomp_loop_ull_static_start
) GOMP_loop_ull_static_start
564 __attribute__((alias ("gomp_loop_ull_static_start")));
565 extern __typeof(gomp_loop_ull_dynamic_start
) GOMP_loop_ull_dynamic_start
566 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
567 extern __typeof(gomp_loop_ull_guided_start
) GOMP_loop_ull_guided_start
568 __attribute__((alias ("gomp_loop_ull_guided_start")));
569 extern __typeof(gomp_loop_ull_dynamic_start
) GOMP_loop_ull_nonmonotonic_dynamic_start
570 __attribute__((alias ("gomp_loop_ull_dynamic_start")));
571 extern __typeof(gomp_loop_ull_guided_start
) GOMP_loop_ull_nonmonotonic_guided_start
572 __attribute__((alias ("gomp_loop_ull_guided_start")));
574 extern __typeof(gomp_loop_ull_ordered_static_start
) GOMP_loop_ull_ordered_static_start
575 __attribute__((alias ("gomp_loop_ull_ordered_static_start")));
576 extern __typeof(gomp_loop_ull_ordered_dynamic_start
) GOMP_loop_ull_ordered_dynamic_start
577 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start")));
578 extern __typeof(gomp_loop_ull_ordered_guided_start
) GOMP_loop_ull_ordered_guided_start
579 __attribute__((alias ("gomp_loop_ull_ordered_guided_start")));
581 extern __typeof(gomp_loop_ull_doacross_static_start
) GOMP_loop_ull_doacross_static_start
582 __attribute__((alias ("gomp_loop_ull_doacross_static_start")));
583 extern __typeof(gomp_loop_ull_doacross_dynamic_start
) GOMP_loop_ull_doacross_dynamic_start
584 __attribute__((alias ("gomp_loop_ull_doacross_dynamic_start")));
585 extern __typeof(gomp_loop_ull_doacross_guided_start
) GOMP_loop_ull_doacross_guided_start
586 __attribute__((alias ("gomp_loop_ull_doacross_guided_start")));
588 extern __typeof(gomp_loop_ull_static_next
) GOMP_loop_ull_static_next
589 __attribute__((alias ("gomp_loop_ull_static_next")));
590 extern __typeof(gomp_loop_ull_dynamic_next
) GOMP_loop_ull_dynamic_next
591 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
592 extern __typeof(gomp_loop_ull_guided_next
) GOMP_loop_ull_guided_next
593 __attribute__((alias ("gomp_loop_ull_guided_next")));
594 extern __typeof(gomp_loop_ull_dynamic_next
) GOMP_loop_ull_nonmonotonic_dynamic_next
595 __attribute__((alias ("gomp_loop_ull_dynamic_next")));
596 extern __typeof(gomp_loop_ull_guided_next
) GOMP_loop_ull_nonmonotonic_guided_next
597 __attribute__((alias ("gomp_loop_ull_guided_next")));
599 extern __typeof(gomp_loop_ull_ordered_static_next
) GOMP_loop_ull_ordered_static_next
600 __attribute__((alias ("gomp_loop_ull_ordered_static_next")));
601 extern __typeof(gomp_loop_ull_ordered_dynamic_next
) GOMP_loop_ull_ordered_dynamic_next
602 __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next")));
603 extern __typeof(gomp_loop_ull_ordered_guided_next
) GOMP_loop_ull_ordered_guided_next
604 __attribute__((alias ("gomp_loop_ull_ordered_guided_next")));
607 GOMP_loop_ull_static_start (bool up
, gomp_ull start
, gomp_ull end
,
608 gomp_ull incr
, gomp_ull chunk_size
,
609 gomp_ull
*istart
, gomp_ull
*iend
)
611 return gomp_loop_ull_static_start (up
, start
, end
, incr
, chunk_size
, istart
,
616 GOMP_loop_ull_dynamic_start (bool up
, gomp_ull start
, gomp_ull end
,
617 gomp_ull incr
, gomp_ull chunk_size
,
618 gomp_ull
*istart
, gomp_ull
*iend
)
620 return gomp_loop_ull_dynamic_start (up
, start
, end
, incr
, chunk_size
, istart
,
625 GOMP_loop_ull_guided_start (bool up
, gomp_ull start
, gomp_ull end
,
626 gomp_ull incr
, gomp_ull chunk_size
,
627 gomp_ull
*istart
, gomp_ull
*iend
)
629 return gomp_loop_ull_guided_start (up
, start
, end
, incr
, chunk_size
, istart
,
634 GOMP_loop_ull_nonmonotonic_dynamic_start (bool up
, gomp_ull start
,
635 gomp_ull end
, gomp_ull incr
,
637 gomp_ull
*istart
, gomp_ull
*iend
)
639 return gomp_loop_ull_dynamic_start (up
, start
, end
, incr
, chunk_size
, istart
,
644 GOMP_loop_ull_nonmonotonic_guided_start (bool up
, gomp_ull start
, gomp_ull end
,
645 gomp_ull incr
, gomp_ull chunk_size
,
646 gomp_ull
*istart
, gomp_ull
*iend
)
648 return gomp_loop_ull_guided_start (up
, start
, end
, incr
, chunk_size
, istart
,
653 GOMP_loop_ull_ordered_static_start (bool up
, gomp_ull start
, gomp_ull end
,
654 gomp_ull incr
, gomp_ull chunk_size
,
655 gomp_ull
*istart
, gomp_ull
*iend
)
657 return gomp_loop_ull_ordered_static_start (up
, start
, end
, incr
, chunk_size
,
662 GOMP_loop_ull_ordered_dynamic_start (bool up
, gomp_ull start
, gomp_ull end
,
663 gomp_ull incr
, gomp_ull chunk_size
,
664 gomp_ull
*istart
, gomp_ull
*iend
)
666 return gomp_loop_ull_ordered_dynamic_start (up
, start
, end
, incr
, chunk_size
,
671 GOMP_loop_ull_ordered_guided_start (bool up
, gomp_ull start
, gomp_ull end
,
672 gomp_ull incr
, gomp_ull chunk_size
,
673 gomp_ull
*istart
, gomp_ull
*iend
)
675 return gomp_loop_ull_ordered_guided_start (up
, start
, end
, incr
, chunk_size
,
680 GOMP_loop_ull_doacross_static_start (unsigned ncounts
, gomp_ull
*counts
,
681 gomp_ull chunk_size
, gomp_ull
*istart
,
684 return gomp_loop_ull_doacross_static_start (ncounts
, counts
, chunk_size
,
689 GOMP_loop_ull_doacross_dynamic_start (unsigned ncounts
, gomp_ull
*counts
,
690 gomp_ull chunk_size
, gomp_ull
*istart
,
693 return gomp_loop_ull_doacross_dynamic_start (ncounts
, counts
, chunk_size
,
698 GOMP_loop_ull_doacross_guided_start (unsigned ncounts
, gomp_ull
*counts
,
699 gomp_ull chunk_size
, gomp_ull
*istart
,
702 return gomp_loop_ull_doacross_guided_start (ncounts
, counts
, chunk_size
,
707 GOMP_loop_ull_static_next (gomp_ull
*istart
, gomp_ull
*iend
)
709 return gomp_loop_ull_static_next (istart
, iend
);
713 GOMP_loop_ull_dynamic_next (gomp_ull
*istart
, gomp_ull
*iend
)
715 return gomp_loop_ull_dynamic_next (istart
, iend
);
719 GOMP_loop_ull_guided_next (gomp_ull
*istart
, gomp_ull
*iend
)
721 return gomp_loop_ull_guided_next (istart
, iend
);
725 GOMP_loop_ull_nonmonotonic_dynamic_next (gomp_ull
*istart
, gomp_ull
*iend
)
727 return gomp_loop_ull_dynamic_next (istart
, iend
);
731 GOMP_loop_ull_nonmonotonic_guided_next (gomp_ull
*istart
, gomp_ull
*iend
)
733 return gomp_loop_ull_guided_next (istart
, iend
);
737 GOMP_loop_ull_ordered_static_next (gomp_ull
*istart
, gomp_ull
*iend
)
739 return gomp_loop_ull_ordered_static_next (istart
, iend
);
743 GOMP_loop_ull_ordered_dynamic_next (gomp_ull
*istart
, gomp_ull
*iend
)
745 return gomp_loop_ull_ordered_dynamic_next (istart
, iend
);
749 GOMP_loop_ull_ordered_guided_next (gomp_ull
*istart
, gomp_ull
*iend
)
751 return gomp_loop_ull_ordered_guided_next (istart
, iend
);