* fold-const.c (fold_binary_loc): Move A - (A & B) into ~B & A ...
[official-gcc.git] / libgomp / loop.c
blob27d78db7a56f907e47a78f3664097782c66d3799
1 /* Copyright (C) 2005-2015 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the LOOP (FOR/DO) construct. */
28 #include <limits.h>
29 #include <stdlib.h>
30 #include "libgomp.h"
33 /* Initialize the given work share construct from the given arguments. */
35 static inline void
36 gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
37 enum gomp_schedule_type sched, long chunk_size)
39 ws->sched = sched;
40 ws->chunk_size = chunk_size;
41 /* Canonicalize loops that have zero iterations to ->next == ->end. */
42 ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
43 ? start : end;
44 ws->incr = incr;
45 ws->next = start;
46 if (sched == GFS_DYNAMIC)
48 ws->chunk_size *= incr;
50 #ifdef HAVE_SYNC_BUILTINS
52 /* For dynamic scheduling prepare things to make each iteration
53 faster. */
54 struct gomp_thread *thr = gomp_thread ();
55 struct gomp_team *team = thr->ts.team;
56 long nthreads = team ? team->nthreads : 1;
58 if (__builtin_expect (incr > 0, 1))
60 /* Cheap overflow protection. */
61 if (__builtin_expect ((nthreads | ws->chunk_size)
62 >= 1UL << (sizeof (long)
63 * __CHAR_BIT__ / 2 - 1), 0))
64 ws->mode = 0;
65 else
66 ws->mode = ws->end < (LONG_MAX
67 - (nthreads + 1) * ws->chunk_size);
69 /* Cheap overflow protection. */
70 else if (__builtin_expect ((nthreads | -ws->chunk_size)
71 >= 1UL << (sizeof (long)
72 * __CHAR_BIT__ / 2 - 1), 0))
73 ws->mode = 0;
74 else
75 ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX;
77 #endif
81 /* The *_start routines are called when first encountering a loop construct
82 that is not bound directly to a parallel construct. The first thread
83 that arrives will create the work-share construct; subsequent threads
84 will see the construct exists and allocate work from it.
86 START, END, INCR are the bounds of the loop; due to the restrictions of
87 OpenMP, these values must be the same in every thread. This is not
88 verified (nor is it entirely verifiable, since START is not necessarily
89 retained intact in the work-share data structure). CHUNK_SIZE is the
90 scheduling parameter; again this must be identical in all threads.
92 Returns true if there's any work for this thread to perform. If so,
93 *ISTART and *IEND are filled with the bounds of the iteration block
94 allocated to this thread. Returns false if all work was assigned to
95 other threads prior to this thread's arrival. */
97 static bool
98 gomp_loop_static_start (long start, long end, long incr, long chunk_size,
99 long *istart, long *iend)
101 struct gomp_thread *thr = gomp_thread ();
103 thr->ts.static_trip = 0;
104 if (gomp_work_share_start (false))
106 gomp_loop_init (thr->ts.work_share, start, end, incr,
107 GFS_STATIC, chunk_size);
108 gomp_work_share_init_done ();
111 return !gomp_iter_static_next (istart, iend);
114 static bool
115 gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
116 long *istart, long *iend)
118 struct gomp_thread *thr = gomp_thread ();
119 bool ret;
121 if (gomp_work_share_start (false))
123 gomp_loop_init (thr->ts.work_share, start, end, incr,
124 GFS_DYNAMIC, chunk_size);
125 gomp_work_share_init_done ();
128 #ifdef HAVE_SYNC_BUILTINS
129 ret = gomp_iter_dynamic_next (istart, iend);
130 #else
131 gomp_mutex_lock (&thr->ts.work_share->lock);
132 ret = gomp_iter_dynamic_next_locked (istart, iend);
133 gomp_mutex_unlock (&thr->ts.work_share->lock);
134 #endif
136 return ret;
139 static bool
140 gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
141 long *istart, long *iend)
143 struct gomp_thread *thr = gomp_thread ();
144 bool ret;
146 if (gomp_work_share_start (false))
148 gomp_loop_init (thr->ts.work_share, start, end, incr,
149 GFS_GUIDED, chunk_size);
150 gomp_work_share_init_done ();
153 #ifdef HAVE_SYNC_BUILTINS
154 ret = gomp_iter_guided_next (istart, iend);
155 #else
156 gomp_mutex_lock (&thr->ts.work_share->lock);
157 ret = gomp_iter_guided_next_locked (istart, iend);
158 gomp_mutex_unlock (&thr->ts.work_share->lock);
159 #endif
161 return ret;
164 bool
165 GOMP_loop_runtime_start (long start, long end, long incr,
166 long *istart, long *iend)
168 struct gomp_task_icv *icv = gomp_icv (false);
169 switch (icv->run_sched_var)
171 case GFS_STATIC:
172 return gomp_loop_static_start (start, end, incr, icv->run_sched_modifier,
173 istart, iend);
174 case GFS_DYNAMIC:
175 return gomp_loop_dynamic_start (start, end, incr, icv->run_sched_modifier,
176 istart, iend);
177 case GFS_GUIDED:
178 return gomp_loop_guided_start (start, end, incr, icv->run_sched_modifier,
179 istart, iend);
180 case GFS_AUTO:
181 /* For now map to schedule(static), later on we could play with feedback
182 driven choice. */
183 return gomp_loop_static_start (start, end, incr, 0, istart, iend);
184 default:
185 abort ();
189 /* The *_ordered_*_start routines are similar. The only difference is that
190 this work-share construct is initialized to expect an ORDERED section. */
192 static bool
193 gomp_loop_ordered_static_start (long start, long end, long incr,
194 long chunk_size, long *istart, long *iend)
196 struct gomp_thread *thr = gomp_thread ();
198 thr->ts.static_trip = 0;
199 if (gomp_work_share_start (true))
201 gomp_loop_init (thr->ts.work_share, start, end, incr,
202 GFS_STATIC, chunk_size);
203 gomp_ordered_static_init ();
204 gomp_work_share_init_done ();
207 return !gomp_iter_static_next (istart, iend);
210 static bool
211 gomp_loop_ordered_dynamic_start (long start, long end, long incr,
212 long chunk_size, long *istart, long *iend)
214 struct gomp_thread *thr = gomp_thread ();
215 bool ret;
217 if (gomp_work_share_start (true))
219 gomp_loop_init (thr->ts.work_share, start, end, incr,
220 GFS_DYNAMIC, chunk_size);
221 gomp_mutex_lock (&thr->ts.work_share->lock);
222 gomp_work_share_init_done ();
224 else
225 gomp_mutex_lock (&thr->ts.work_share->lock);
227 ret = gomp_iter_dynamic_next_locked (istart, iend);
228 if (ret)
229 gomp_ordered_first ();
230 gomp_mutex_unlock (&thr->ts.work_share->lock);
232 return ret;
235 static bool
236 gomp_loop_ordered_guided_start (long start, long end, long incr,
237 long chunk_size, long *istart, long *iend)
239 struct gomp_thread *thr = gomp_thread ();
240 bool ret;
242 if (gomp_work_share_start (true))
244 gomp_loop_init (thr->ts.work_share, start, end, incr,
245 GFS_GUIDED, chunk_size);
246 gomp_mutex_lock (&thr->ts.work_share->lock);
247 gomp_work_share_init_done ();
249 else
250 gomp_mutex_lock (&thr->ts.work_share->lock);
252 ret = gomp_iter_guided_next_locked (istart, iend);
253 if (ret)
254 gomp_ordered_first ();
255 gomp_mutex_unlock (&thr->ts.work_share->lock);
257 return ret;
260 bool
261 GOMP_loop_ordered_runtime_start (long start, long end, long incr,
262 long *istart, long *iend)
264 struct gomp_task_icv *icv = gomp_icv (false);
265 switch (icv->run_sched_var)
267 case GFS_STATIC:
268 return gomp_loop_ordered_static_start (start, end, incr,
269 icv->run_sched_modifier,
270 istart, iend);
271 case GFS_DYNAMIC:
272 return gomp_loop_ordered_dynamic_start (start, end, incr,
273 icv->run_sched_modifier,
274 istart, iend);
275 case GFS_GUIDED:
276 return gomp_loop_ordered_guided_start (start, end, incr,
277 icv->run_sched_modifier,
278 istart, iend);
279 case GFS_AUTO:
280 /* For now map to schedule(static), later on we could play with feedback
281 driven choice. */
282 return gomp_loop_ordered_static_start (start, end, incr,
283 0, istart, iend);
284 default:
285 abort ();
289 /* The *_next routines are called when the thread completes processing of
290 the iteration block currently assigned to it. If the work-share
291 construct is bound directly to a parallel construct, then the iteration
292 bounds may have been set up before the parallel. In which case, this
293 may be the first iteration for the thread.
295 Returns true if there is work remaining to be performed; *ISTART and
296 *IEND are filled with a new iteration block. Returns false if all work
297 has been assigned. */
299 static bool
300 gomp_loop_static_next (long *istart, long *iend)
302 return !gomp_iter_static_next (istart, iend);
305 static bool
306 gomp_loop_dynamic_next (long *istart, long *iend)
308 bool ret;
310 #ifdef HAVE_SYNC_BUILTINS
311 ret = gomp_iter_dynamic_next (istart, iend);
312 #else
313 struct gomp_thread *thr = gomp_thread ();
314 gomp_mutex_lock (&thr->ts.work_share->lock);
315 ret = gomp_iter_dynamic_next_locked (istart, iend);
316 gomp_mutex_unlock (&thr->ts.work_share->lock);
317 #endif
319 return ret;
322 static bool
323 gomp_loop_guided_next (long *istart, long *iend)
325 bool ret;
327 #ifdef HAVE_SYNC_BUILTINS
328 ret = gomp_iter_guided_next (istart, iend);
329 #else
330 struct gomp_thread *thr = gomp_thread ();
331 gomp_mutex_lock (&thr->ts.work_share->lock);
332 ret = gomp_iter_guided_next_locked (istart, iend);
333 gomp_mutex_unlock (&thr->ts.work_share->lock);
334 #endif
336 return ret;
339 bool
340 GOMP_loop_runtime_next (long *istart, long *iend)
342 struct gomp_thread *thr = gomp_thread ();
344 switch (thr->ts.work_share->sched)
346 case GFS_STATIC:
347 case GFS_AUTO:
348 return gomp_loop_static_next (istart, iend);
349 case GFS_DYNAMIC:
350 return gomp_loop_dynamic_next (istart, iend);
351 case GFS_GUIDED:
352 return gomp_loop_guided_next (istart, iend);
353 default:
354 abort ();
358 /* The *_ordered_*_next routines are called when the thread completes
359 processing of the iteration block currently assigned to it.
361 Returns true if there is work remaining to be performed; *ISTART and
362 *IEND are filled with a new iteration block. Returns false if all work
363 has been assigned. */
365 static bool
366 gomp_loop_ordered_static_next (long *istart, long *iend)
368 struct gomp_thread *thr = gomp_thread ();
369 int test;
371 gomp_ordered_sync ();
372 gomp_mutex_lock (&thr->ts.work_share->lock);
373 test = gomp_iter_static_next (istart, iend);
374 if (test >= 0)
375 gomp_ordered_static_next ();
376 gomp_mutex_unlock (&thr->ts.work_share->lock);
378 return test == 0;
381 static bool
382 gomp_loop_ordered_dynamic_next (long *istart, long *iend)
384 struct gomp_thread *thr = gomp_thread ();
385 bool ret;
387 gomp_ordered_sync ();
388 gomp_mutex_lock (&thr->ts.work_share->lock);
389 ret = gomp_iter_dynamic_next_locked (istart, iend);
390 if (ret)
391 gomp_ordered_next ();
392 else
393 gomp_ordered_last ();
394 gomp_mutex_unlock (&thr->ts.work_share->lock);
396 return ret;
399 static bool
400 gomp_loop_ordered_guided_next (long *istart, long *iend)
402 struct gomp_thread *thr = gomp_thread ();
403 bool ret;
405 gomp_ordered_sync ();
406 gomp_mutex_lock (&thr->ts.work_share->lock);
407 ret = gomp_iter_guided_next_locked (istart, iend);
408 if (ret)
409 gomp_ordered_next ();
410 else
411 gomp_ordered_last ();
412 gomp_mutex_unlock (&thr->ts.work_share->lock);
414 return ret;
417 bool
418 GOMP_loop_ordered_runtime_next (long *istart, long *iend)
420 struct gomp_thread *thr = gomp_thread ();
422 switch (thr->ts.work_share->sched)
424 case GFS_STATIC:
425 case GFS_AUTO:
426 return gomp_loop_ordered_static_next (istart, iend);
427 case GFS_DYNAMIC:
428 return gomp_loop_ordered_dynamic_next (istart, iend);
429 case GFS_GUIDED:
430 return gomp_loop_ordered_guided_next (istart, iend);
431 default:
432 abort ();
436 /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
437 to avoid one synchronization once we get into the loop. */
439 static void
440 gomp_parallel_loop_start (void (*fn) (void *), void *data,
441 unsigned num_threads, long start, long end,
442 long incr, enum gomp_schedule_type sched,
443 long chunk_size, unsigned int flags)
445 struct gomp_team *team;
447 num_threads = gomp_resolve_num_threads (num_threads, 0);
448 team = gomp_new_team (num_threads);
449 gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size);
450 gomp_team_start (fn, data, num_threads, flags, team);
453 void
454 GOMP_parallel_loop_static_start (void (*fn) (void *), void *data,
455 unsigned num_threads, long start, long end,
456 long incr, long chunk_size)
458 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
459 GFS_STATIC, chunk_size, 0);
462 void
463 GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data,
464 unsigned num_threads, long start, long end,
465 long incr, long chunk_size)
467 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
468 GFS_DYNAMIC, chunk_size, 0);
471 void
472 GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data,
473 unsigned num_threads, long start, long end,
474 long incr, long chunk_size)
476 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
477 GFS_GUIDED, chunk_size, 0);
480 void
481 GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data,
482 unsigned num_threads, long start, long end,
483 long incr)
485 struct gomp_task_icv *icv = gomp_icv (false);
486 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
487 icv->run_sched_var, icv->run_sched_modifier, 0);
490 ialias_redirect (GOMP_parallel_end)
492 void
493 GOMP_parallel_loop_static (void (*fn) (void *), void *data,
494 unsigned num_threads, long start, long end,
495 long incr, long chunk_size, unsigned flags)
497 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
498 GFS_STATIC, chunk_size, flags);
499 fn (data);
500 GOMP_parallel_end ();
503 void
504 GOMP_parallel_loop_dynamic (void (*fn) (void *), void *data,
505 unsigned num_threads, long start, long end,
506 long incr, long chunk_size, unsigned flags)
508 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
509 GFS_DYNAMIC, chunk_size, flags);
510 fn (data);
511 GOMP_parallel_end ();
514 void
515 GOMP_parallel_loop_guided (void (*fn) (void *), void *data,
516 unsigned num_threads, long start, long end,
517 long incr, long chunk_size, unsigned flags)
519 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
520 GFS_GUIDED, chunk_size, flags);
521 fn (data);
522 GOMP_parallel_end ();
525 void
526 GOMP_parallel_loop_runtime (void (*fn) (void *), void *data,
527 unsigned num_threads, long start, long end,
528 long incr, unsigned flags)
530 struct gomp_task_icv *icv = gomp_icv (false);
531 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
532 icv->run_sched_var, icv->run_sched_modifier,
533 flags);
534 fn (data);
535 GOMP_parallel_end ();
538 /* The GOMP_loop_end* routines are called after the thread is told that
539 all loop iterations are complete. The first two versions synchronize
540 all threads; the nowait version does not. */
542 void
543 GOMP_loop_end (void)
545 gomp_work_share_end ();
548 bool
549 GOMP_loop_end_cancel (void)
551 return gomp_work_share_end_cancel ();
554 void
555 GOMP_loop_end_nowait (void)
557 gomp_work_share_end_nowait ();
561 /* We use static functions above so that we're sure that the "runtime"
562 function can defer to the proper routine without interposition. We
563 export the static function with a strong alias when possible, or with
564 a wrapper function otherwise. */
566 #ifdef HAVE_ATTRIBUTE_ALIAS
567 extern __typeof(gomp_loop_static_start) GOMP_loop_static_start
568 __attribute__((alias ("gomp_loop_static_start")));
569 extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start
570 __attribute__((alias ("gomp_loop_dynamic_start")));
571 extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start
572 __attribute__((alias ("gomp_loop_guided_start")));
574 extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start
575 __attribute__((alias ("gomp_loop_ordered_static_start")));
576 extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start
577 __attribute__((alias ("gomp_loop_ordered_dynamic_start")));
578 extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start
579 __attribute__((alias ("gomp_loop_ordered_guided_start")));
581 extern __typeof(gomp_loop_static_next) GOMP_loop_static_next
582 __attribute__((alias ("gomp_loop_static_next")));
583 extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next
584 __attribute__((alias ("gomp_loop_dynamic_next")));
585 extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next
586 __attribute__((alias ("gomp_loop_guided_next")));
588 extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next
589 __attribute__((alias ("gomp_loop_ordered_static_next")));
590 extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next
591 __attribute__((alias ("gomp_loop_ordered_dynamic_next")));
592 extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next
593 __attribute__((alias ("gomp_loop_ordered_guided_next")));
594 #else
595 bool
596 GOMP_loop_static_start (long start, long end, long incr, long chunk_size,
597 long *istart, long *iend)
599 return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend);
602 bool
603 GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size,
604 long *istart, long *iend)
606 return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
609 bool
610 GOMP_loop_guided_start (long start, long end, long incr, long chunk_size,
611 long *istart, long *iend)
613 return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
616 bool
617 GOMP_loop_ordered_static_start (long start, long end, long incr,
618 long chunk_size, long *istart, long *iend)
620 return gomp_loop_ordered_static_start (start, end, incr, chunk_size,
621 istart, iend);
624 bool
625 GOMP_loop_ordered_dynamic_start (long start, long end, long incr,
626 long chunk_size, long *istart, long *iend)
628 return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size,
629 istart, iend);
632 bool
633 GOMP_loop_ordered_guided_start (long start, long end, long incr,
634 long chunk_size, long *istart, long *iend)
636 return gomp_loop_ordered_guided_start (start, end, incr, chunk_size,
637 istart, iend);
640 bool
641 GOMP_loop_static_next (long *istart, long *iend)
643 return gomp_loop_static_next (istart, iend);
646 bool
647 GOMP_loop_dynamic_next (long *istart, long *iend)
649 return gomp_loop_dynamic_next (istart, iend);
652 bool
653 GOMP_loop_guided_next (long *istart, long *iend)
655 return gomp_loop_guided_next (istart, iend);
658 bool
659 GOMP_loop_ordered_static_next (long *istart, long *iend)
661 return gomp_loop_ordered_static_next (istart, iend);
664 bool
665 GOMP_loop_ordered_dynamic_next (long *istart, long *iend)
667 return gomp_loop_ordered_dynamic_next (istart, iend);
670 bool
671 GOMP_loop_ordered_guided_next (long *istart, long *iend)
673 return gomp_loop_ordered_guided_next (istart, iend);
675 #endif