* config/sparc/constraints.md: New file.
[official-gcc.git] / libgomp / loop.c
blob1cea334bcbfca3a13c7194e5717add2014f93855
1 /* Copyright (C) 2005, 2008 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the LOOP (FOR/DO) construct. */
30 #include <limits.h>
31 #include <stdlib.h>
32 #include "libgomp.h"
35 /* Initialize the given work share construct from the given arguments. */
37 static inline void
38 gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
39 enum gomp_schedule_type sched, long chunk_size)
41 ws->sched = sched;
42 ws->chunk_size = chunk_size;
43 /* Canonicalize loops that have zero iterations to ->next == ->end. */
44 ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
45 ? start : end;
46 ws->incr = incr;
47 ws->next = start;
48 if (sched == GFS_DYNAMIC)
50 ws->chunk_size *= incr;
52 #ifdef HAVE_SYNC_BUILTINS
54 /* For dynamic scheduling prepare things to make each iteration
55 faster. */
56 struct gomp_thread *thr = gomp_thread ();
57 struct gomp_team *team = thr->ts.team;
58 long nthreads = team ? team->nthreads : 1;
60 if (__builtin_expect (incr > 0, 1))
62 /* Cheap overflow protection. */
63 if (__builtin_expect ((nthreads | ws->chunk_size)
64 >= 1UL << (sizeof (long)
65 * __CHAR_BIT__ / 2 - 1), 0))
66 ws->mode = 0;
67 else
68 ws->mode = ws->end < (LONG_MAX
69 - (nthreads + 1) * ws->chunk_size);
71 /* Cheap overflow protection. */
72 else if (__builtin_expect ((nthreads | -ws->chunk_size)
73 >= 1UL << (sizeof (long)
74 * __CHAR_BIT__ / 2 - 1), 0))
75 ws->mode = 0;
76 else
77 ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX;
79 #endif
83 /* The *_start routines are called when first encountering a loop construct
84 that is not bound directly to a parallel construct. The first thread
85 that arrives will create the work-share construct; subsequent threads
86 will see the construct exists and allocate work from it.
88 START, END, INCR are the bounds of the loop; due to the restrictions of
89 OpenMP, these values must be the same in every thread. This is not
90 verified (nor is it entirely verifiable, since START is not necessarily
91 retained intact in the work-share data structure). CHUNK_SIZE is the
92 scheduling parameter; again this must be identical in all threads.
94 Returns true if there's any work for this thread to perform. If so,
95 *ISTART and *IEND are filled with the bounds of the iteration block
96 allocated to this thread. Returns false if all work was assigned to
97 other threads prior to this thread's arrival. */
99 static bool
100 gomp_loop_static_start (long start, long end, long incr, long chunk_size,
101 long *istart, long *iend)
103 struct gomp_thread *thr = gomp_thread ();
105 thr->ts.static_trip = 0;
106 if (gomp_work_share_start (false))
108 gomp_loop_init (thr->ts.work_share, start, end, incr,
109 GFS_STATIC, chunk_size);
110 gomp_work_share_init_done ();
113 return !gomp_iter_static_next (istart, iend);
116 static bool
117 gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
118 long *istart, long *iend)
120 struct gomp_thread *thr = gomp_thread ();
121 bool ret;
123 if (gomp_work_share_start (false))
125 gomp_loop_init (thr->ts.work_share, start, end, incr,
126 GFS_DYNAMIC, chunk_size);
127 gomp_work_share_init_done ();
130 #ifdef HAVE_SYNC_BUILTINS
131 ret = gomp_iter_dynamic_next (istart, iend);
132 #else
133 gomp_mutex_lock (&thr->ts.work_share->lock);
134 ret = gomp_iter_dynamic_next_locked (istart, iend);
135 gomp_mutex_unlock (&thr->ts.work_share->lock);
136 #endif
138 return ret;
141 static bool
142 gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
143 long *istart, long *iend)
145 struct gomp_thread *thr = gomp_thread ();
146 bool ret;
148 if (gomp_work_share_start (false))
150 gomp_loop_init (thr->ts.work_share, start, end, incr,
151 GFS_GUIDED, chunk_size);
152 gomp_work_share_init_done ();
155 #ifdef HAVE_SYNC_BUILTINS
156 ret = gomp_iter_guided_next (istart, iend);
157 #else
158 gomp_mutex_lock (&thr->ts.work_share->lock);
159 ret = gomp_iter_guided_next_locked (istart, iend);
160 gomp_mutex_unlock (&thr->ts.work_share->lock);
161 #endif
163 return ret;
166 bool
167 GOMP_loop_runtime_start (long start, long end, long incr,
168 long *istart, long *iend)
170 struct gomp_task_icv *icv = gomp_icv (false);
171 switch (icv->run_sched_var)
173 case GFS_STATIC:
174 return gomp_loop_static_start (start, end, incr, icv->run_sched_modifier,
175 istart, iend);
176 case GFS_DYNAMIC:
177 return gomp_loop_dynamic_start (start, end, incr, icv->run_sched_modifier,
178 istart, iend);
179 case GFS_GUIDED:
180 return gomp_loop_guided_start (start, end, incr, icv->run_sched_modifier,
181 istart, iend);
182 case GFS_AUTO:
183 /* For now map to schedule(static), later on we could play with feedback
184 driven choice. */
185 return gomp_loop_static_start (start, end, incr, 0, istart, iend);
186 default:
187 abort ();
191 /* The *_ordered_*_start routines are similar. The only difference is that
192 this work-share construct is initialized to expect an ORDERED section. */
194 static bool
195 gomp_loop_ordered_static_start (long start, long end, long incr,
196 long chunk_size, long *istart, long *iend)
198 struct gomp_thread *thr = gomp_thread ();
200 thr->ts.static_trip = 0;
201 if (gomp_work_share_start (true))
203 gomp_loop_init (thr->ts.work_share, start, end, incr,
204 GFS_STATIC, chunk_size);
205 gomp_ordered_static_init ();
206 gomp_work_share_init_done ();
209 return !gomp_iter_static_next (istart, iend);
212 static bool
213 gomp_loop_ordered_dynamic_start (long start, long end, long incr,
214 long chunk_size, long *istart, long *iend)
216 struct gomp_thread *thr = gomp_thread ();
217 bool ret;
219 if (gomp_work_share_start (true))
221 gomp_loop_init (thr->ts.work_share, start, end, incr,
222 GFS_DYNAMIC, chunk_size);
223 gomp_mutex_lock (&thr->ts.work_share->lock);
224 gomp_work_share_init_done ();
226 else
227 gomp_mutex_lock (&thr->ts.work_share->lock);
229 ret = gomp_iter_dynamic_next_locked (istart, iend);
230 if (ret)
231 gomp_ordered_first ();
232 gomp_mutex_unlock (&thr->ts.work_share->lock);
234 return ret;
237 static bool
238 gomp_loop_ordered_guided_start (long start, long end, long incr,
239 long chunk_size, long *istart, long *iend)
241 struct gomp_thread *thr = gomp_thread ();
242 bool ret;
244 if (gomp_work_share_start (true))
246 gomp_loop_init (thr->ts.work_share, start, end, incr,
247 GFS_GUIDED, chunk_size);
248 gomp_mutex_lock (&thr->ts.work_share->lock);
249 gomp_work_share_init_done ();
251 else
252 gomp_mutex_lock (&thr->ts.work_share->lock);
254 ret = gomp_iter_guided_next_locked (istart, iend);
255 if (ret)
256 gomp_ordered_first ();
257 gomp_mutex_unlock (&thr->ts.work_share->lock);
259 return ret;
262 bool
263 GOMP_loop_ordered_runtime_start (long start, long end, long incr,
264 long *istart, long *iend)
266 struct gomp_task_icv *icv = gomp_icv (false);
267 switch (icv->run_sched_var)
269 case GFS_STATIC:
270 return gomp_loop_ordered_static_start (start, end, incr,
271 icv->run_sched_modifier,
272 istart, iend);
273 case GFS_DYNAMIC:
274 return gomp_loop_ordered_dynamic_start (start, end, incr,
275 icv->run_sched_modifier,
276 istart, iend);
277 case GFS_GUIDED:
278 return gomp_loop_ordered_guided_start (start, end, incr,
279 icv->run_sched_modifier,
280 istart, iend);
281 case GFS_AUTO:
282 /* For now map to schedule(static), later on we could play with feedback
283 driven choice. */
284 return gomp_loop_ordered_static_start (start, end, incr,
285 0, istart, iend);
286 default:
287 abort ();
291 /* The *_next routines are called when the thread completes processing of
292 the iteration block currently assigned to it. If the work-share
293 construct is bound directly to a parallel construct, then the iteration
294 bounds may have been set up before the parallel. In which case, this
295 may be the first iteration for the thread.
297 Returns true if there is work remaining to be performed; *ISTART and
298 *IEND are filled with a new iteration block. Returns false if all work
299 has been assigned. */
301 static bool
302 gomp_loop_static_next (long *istart, long *iend)
304 return !gomp_iter_static_next (istart, iend);
307 static bool
308 gomp_loop_dynamic_next (long *istart, long *iend)
310 bool ret;
312 #ifdef HAVE_SYNC_BUILTINS
313 ret = gomp_iter_dynamic_next (istart, iend);
314 #else
315 struct gomp_thread *thr = gomp_thread ();
316 gomp_mutex_lock (&thr->ts.work_share->lock);
317 ret = gomp_iter_dynamic_next_locked (istart, iend);
318 gomp_mutex_unlock (&thr->ts.work_share->lock);
319 #endif
321 return ret;
324 static bool
325 gomp_loop_guided_next (long *istart, long *iend)
327 bool ret;
329 #ifdef HAVE_SYNC_BUILTINS
330 ret = gomp_iter_guided_next (istart, iend);
331 #else
332 struct gomp_thread *thr = gomp_thread ();
333 gomp_mutex_lock (&thr->ts.work_share->lock);
334 ret = gomp_iter_guided_next_locked (istart, iend);
335 gomp_mutex_unlock (&thr->ts.work_share->lock);
336 #endif
338 return ret;
341 bool
342 GOMP_loop_runtime_next (long *istart, long *iend)
344 struct gomp_thread *thr = gomp_thread ();
346 switch (thr->ts.work_share->sched)
348 case GFS_STATIC:
349 case GFS_AUTO:
350 return gomp_loop_static_next (istart, iend);
351 case GFS_DYNAMIC:
352 return gomp_loop_dynamic_next (istart, iend);
353 case GFS_GUIDED:
354 return gomp_loop_guided_next (istart, iend);
355 default:
356 abort ();
360 /* The *_ordered_*_next routines are called when the thread completes
361 processing of the iteration block currently assigned to it.
363 Returns true if there is work remaining to be performed; *ISTART and
364 *IEND are filled with a new iteration block. Returns false if all work
365 has been assigned. */
367 static bool
368 gomp_loop_ordered_static_next (long *istart, long *iend)
370 struct gomp_thread *thr = gomp_thread ();
371 int test;
373 gomp_ordered_sync ();
374 gomp_mutex_lock (&thr->ts.work_share->lock);
375 test = gomp_iter_static_next (istart, iend);
376 if (test >= 0)
377 gomp_ordered_static_next ();
378 gomp_mutex_unlock (&thr->ts.work_share->lock);
380 return test == 0;
383 static bool
384 gomp_loop_ordered_dynamic_next (long *istart, long *iend)
386 struct gomp_thread *thr = gomp_thread ();
387 bool ret;
389 gomp_ordered_sync ();
390 gomp_mutex_lock (&thr->ts.work_share->lock);
391 ret = gomp_iter_dynamic_next_locked (istart, iend);
392 if (ret)
393 gomp_ordered_next ();
394 else
395 gomp_ordered_last ();
396 gomp_mutex_unlock (&thr->ts.work_share->lock);
398 return ret;
401 static bool
402 gomp_loop_ordered_guided_next (long *istart, long *iend)
404 struct gomp_thread *thr = gomp_thread ();
405 bool ret;
407 gomp_ordered_sync ();
408 gomp_mutex_lock (&thr->ts.work_share->lock);
409 ret = gomp_iter_guided_next_locked (istart, iend);
410 if (ret)
411 gomp_ordered_next ();
412 else
413 gomp_ordered_last ();
414 gomp_mutex_unlock (&thr->ts.work_share->lock);
416 return ret;
419 bool
420 GOMP_loop_ordered_runtime_next (long *istart, long *iend)
422 struct gomp_thread *thr = gomp_thread ();
424 switch (thr->ts.work_share->sched)
426 case GFS_STATIC:
427 case GFS_AUTO:
428 return gomp_loop_ordered_static_next (istart, iend);
429 case GFS_DYNAMIC:
430 return gomp_loop_ordered_dynamic_next (istart, iend);
431 case GFS_GUIDED:
432 return gomp_loop_ordered_guided_next (istart, iend);
433 default:
434 abort ();
438 /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
439 to avoid one synchronization once we get into the loop. */
441 static void
442 gomp_parallel_loop_start (void (*fn) (void *), void *data,
443 unsigned num_threads, long start, long end,
444 long incr, enum gomp_schedule_type sched,
445 long chunk_size)
447 struct gomp_team *team;
449 num_threads = gomp_resolve_num_threads (num_threads, 0);
450 team = gomp_new_team (num_threads);
451 gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size);
452 gomp_team_start (fn, data, num_threads, team);
455 void
456 GOMP_parallel_loop_static_start (void (*fn) (void *), void *data,
457 unsigned num_threads, long start, long end,
458 long incr, long chunk_size)
460 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
461 GFS_STATIC, chunk_size);
464 void
465 GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data,
466 unsigned num_threads, long start, long end,
467 long incr, long chunk_size)
469 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
470 GFS_DYNAMIC, chunk_size);
473 void
474 GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data,
475 unsigned num_threads, long start, long end,
476 long incr, long chunk_size)
478 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
479 GFS_GUIDED, chunk_size);
482 void
483 GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data,
484 unsigned num_threads, long start, long end,
485 long incr)
487 struct gomp_task_icv *icv = gomp_icv (false);
488 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
489 icv->run_sched_var, icv->run_sched_modifier);
492 /* The GOMP_loop_end* routines are called after the thread is told that
493 all loop iterations are complete. This first version synchronizes
494 all threads; the nowait version does not. */
496 void
497 GOMP_loop_end (void)
499 gomp_work_share_end ();
502 void
503 GOMP_loop_end_nowait (void)
505 gomp_work_share_end_nowait ();
509 /* We use static functions above so that we're sure that the "runtime"
510 function can defer to the proper routine without interposition. We
511 export the static function with a strong alias when possible, or with
512 a wrapper function otherwise. */
514 #ifdef HAVE_ATTRIBUTE_ALIAS
515 extern __typeof(gomp_loop_static_start) GOMP_loop_static_start
516 __attribute__((alias ("gomp_loop_static_start")));
517 extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start
518 __attribute__((alias ("gomp_loop_dynamic_start")));
519 extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start
520 __attribute__((alias ("gomp_loop_guided_start")));
522 extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start
523 __attribute__((alias ("gomp_loop_ordered_static_start")));
524 extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start
525 __attribute__((alias ("gomp_loop_ordered_dynamic_start")));
526 extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start
527 __attribute__((alias ("gomp_loop_ordered_guided_start")));
529 extern __typeof(gomp_loop_static_next) GOMP_loop_static_next
530 __attribute__((alias ("gomp_loop_static_next")));
531 extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next
532 __attribute__((alias ("gomp_loop_dynamic_next")));
533 extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next
534 __attribute__((alias ("gomp_loop_guided_next")));
536 extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next
537 __attribute__((alias ("gomp_loop_ordered_static_next")));
538 extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next
539 __attribute__((alias ("gomp_loop_ordered_dynamic_next")));
540 extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next
541 __attribute__((alias ("gomp_loop_ordered_guided_next")));
542 #else
543 bool
544 GOMP_loop_static_start (long start, long end, long incr, long chunk_size,
545 long *istart, long *iend)
547 return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend);
550 bool
551 GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size,
552 long *istart, long *iend)
554 return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
557 bool
558 GOMP_loop_guided_start (long start, long end, long incr, long chunk_size,
559 long *istart, long *iend)
561 return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
564 bool
565 GOMP_loop_ordered_static_start (long start, long end, long incr,
566 long chunk_size, long *istart, long *iend)
568 return gomp_loop_ordered_static_start (start, end, incr, chunk_size,
569 istart, iend);
572 bool
573 GOMP_loop_ordered_dynamic_start (long start, long end, long incr,
574 long chunk_size, long *istart, long *iend)
576 return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size,
577 istart, iend);
580 bool
581 GOMP_loop_ordered_guided_start (long start, long end, long incr,
582 long chunk_size, long *istart, long *iend)
584 return gomp_loop_ordered_guided_start (start, end, incr, chunk_size,
585 istart, iend);
588 bool
589 GOMP_loop_static_next (long *istart, long *iend)
591 return gomp_loop_static_next (istart, iend);
594 bool
595 GOMP_loop_dynamic_next (long *istart, long *iend)
597 return gomp_loop_dynamic_next (istart, iend);
600 bool
601 GOMP_loop_guided_next (long *istart, long *iend)
603 return gomp_loop_guided_next (istart, iend);
606 bool
607 GOMP_loop_ordered_static_next (long *istart, long *iend)
609 return gomp_loop_ordered_static_next (istart, iend);
612 bool
613 GOMP_loop_ordered_dynamic_next (long *istart, long *iend)
615 return gomp_loop_ordered_dynamic_next (istart, iend);
618 bool
619 GOMP_loop_ordered_guided_next (long *istart, long *iend)
621 return gomp_loop_ordered_guided_next (istart, iend);
623 #endif