* dependency.c (gfc_is_same_range): Compare the stride, lower and
[official-gcc.git] / libgomp / loop.c
blob3d1b1efaf31375c6b36993c48d54e8ceae40c410
1 /* Copyright (C) 2005 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the LOOP (FOR/DO) construct. */
30 #include "libgomp.h"
31 #include <stdlib.h>
34 /* Initialize the given work share construct from the given arguments. */
36 static inline void
37 gomp_loop_init (struct gomp_work_share *ws, unsigned long start,
38 unsigned long end, unsigned long incr,
39 enum gomp_schedule_type sched, unsigned long chunk_size)
41 ws->sched = sched;
42 ws->chunk_size = chunk_size;
43 ws->end = end;
44 ws->incr = incr;
45 ws->next = start;
48 /* The *_start routines are called when first encountering a loop construct
49 that is not bound directly to a parallel construct. The first thread
50 that arrives will create the work-share construct; subsequent threads
51 will see the construct exists and allocate work from it.
53 START, END, INCR are the bounds of the loop; due to the restrictions of
54 OpenMP, these values must be the same in every thread. This is not
55 verified (nor is it entirely verifiable, since START is not necessarily
56 retained intact in the work-share data structure). CHUNK_SIZE is the
57 scheduling parameter; again this must be identical in all threads.
59 Returns true if there's any work for this thread to perform. If so,
60 *ISTART and *IEND are filled with the bounds of the iteration block
61 allocated to this thread. Returns false if all work was assigned to
62 other threads prior to this thread's arrival. */
64 static bool
65 gomp_loop_static_start (long start, long end, long incr, long chunk_size,
66 long *istart, long *iend)
68 struct gomp_thread *thr = gomp_thread ();
70 if (gomp_work_share_start (false))
71 gomp_loop_init (thr->ts.work_share, start, end, incr,
72 GFS_STATIC, chunk_size);
73 gomp_mutex_unlock (&thr->ts.work_share->lock);
75 return !gomp_iter_static_next (istart, iend);
78 static bool
79 gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
80 long *istart, long *iend)
82 struct gomp_thread *thr = gomp_thread ();
83 bool ret;
85 if (gomp_work_share_start (false))
86 gomp_loop_init (thr->ts.work_share, start, end, incr,
87 GFS_DYNAMIC, chunk_size);
89 #ifdef HAVE_SYNC_BUILTINS
90 gomp_mutex_unlock (&thr->ts.work_share->lock);
91 ret = gomp_iter_dynamic_next (istart, iend);
92 #else
93 ret = gomp_iter_dynamic_next_locked (istart, iend);
94 gomp_mutex_unlock (&thr->ts.work_share->lock);
95 #endif
97 return ret;
100 static bool
101 gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
102 long *istart, long *iend)
104 struct gomp_thread *thr = gomp_thread ();
105 bool ret;
107 if (gomp_work_share_start (false))
108 gomp_loop_init (thr->ts.work_share, start, end, incr,
109 GFS_GUIDED, chunk_size);
111 #ifdef HAVE_SYNC_BUILTINS
112 gomp_mutex_unlock (&thr->ts.work_share->lock);
113 ret = gomp_iter_guided_next (istart, iend);
114 #else
115 ret = gomp_iter_guided_next_locked (istart, iend);
116 gomp_mutex_unlock (&thr->ts.work_share->lock);
117 #endif
119 return ret;
122 bool
123 GOMP_loop_runtime_start (long start, long end, long incr,
124 long *istart, long *iend)
126 switch (gomp_run_sched_var)
128 case GFS_STATIC:
129 return gomp_loop_static_start (start, end, incr, gomp_run_sched_chunk,
130 istart, iend);
131 case GFS_DYNAMIC:
132 return gomp_loop_dynamic_start (start, end, incr, gomp_run_sched_chunk,
133 istart, iend);
134 case GFS_GUIDED:
135 return gomp_loop_guided_start (start, end, incr, gomp_run_sched_chunk,
136 istart, iend);
137 default:
138 abort ();
142 /* The *_ordered_*_start routines are similar. The only difference is that
143 this work-share construct is initialized to expect an ORDERED section. */
145 static bool
146 gomp_loop_ordered_static_start (long start, long end, long incr,
147 long chunk_size, long *istart, long *iend)
149 struct gomp_thread *thr = gomp_thread ();
151 if (start == end)
152 return false;
154 if (gomp_work_share_start (true))
156 gomp_loop_init (thr->ts.work_share, start, end, incr,
157 GFS_STATIC, chunk_size);
158 gomp_ordered_static_init ();
160 gomp_mutex_unlock (&thr->ts.work_share->lock);
162 return !gomp_iter_static_next (istart, iend);
165 static bool
166 gomp_loop_ordered_dynamic_start (long start, long end, long incr,
167 long chunk_size, long *istart, long *iend)
169 struct gomp_thread *thr = gomp_thread ();
170 bool ret;
172 if (gomp_work_share_start (true))
173 gomp_loop_init (thr->ts.work_share, start, end, incr,
174 GFS_DYNAMIC, chunk_size);
176 ret = gomp_iter_dynamic_next_locked (istart, iend);
177 if (ret)
178 gomp_ordered_first ();
179 gomp_mutex_unlock (&thr->ts.work_share->lock);
181 return ret;
184 static bool
185 gomp_loop_ordered_guided_start (long start, long end, long incr,
186 long chunk_size, long *istart, long *iend)
188 struct gomp_thread *thr = gomp_thread ();
189 bool ret;
191 if (gomp_work_share_start (true))
192 gomp_loop_init (thr->ts.work_share, start, end, incr,
193 GFS_GUIDED, chunk_size);
195 ret = gomp_iter_guided_next_locked (istart, iend);
196 if (ret)
197 gomp_ordered_first ();
198 gomp_mutex_unlock (&thr->ts.work_share->lock);
200 return ret;
203 bool
204 GOMP_loop_ordered_runtime_start (long start, long end, long incr,
205 long *istart, long *iend)
207 switch (gomp_run_sched_var)
209 case GFS_STATIC:
210 return gomp_loop_ordered_static_start (start, end, incr,
211 gomp_run_sched_chunk,
212 istart, iend);
213 case GFS_DYNAMIC:
214 return gomp_loop_ordered_dynamic_start (start, end, incr,
215 gomp_run_sched_chunk,
216 istart, iend);
217 case GFS_GUIDED:
218 return gomp_loop_ordered_guided_start (start, end, incr,
219 gomp_run_sched_chunk,
220 istart, iend);
221 default:
222 abort ();
226 /* The *_next routines are called when the thread completes processing of
227 the iteration block currently assigned to it. If the work-share
228 construct is bound directly to a parallel construct, then the iteration
229 bounds may have been set up before the parallel. In which case, this
230 may be the first iteration for the thread.
232 Returns true if there is work remaining to be performed; *ISTART and
233 *IEND are filled with a new iteration block. Returns false if all work
234 has been assigned. */
236 static bool
237 gomp_loop_static_next (long *istart, long *iend)
239 return !gomp_iter_static_next (istart, iend);
242 static bool
243 gomp_loop_dynamic_next (long *istart, long *iend)
245 bool ret;
247 #ifdef HAVE_SYNC_BUILTINS
248 ret = gomp_iter_dynamic_next (istart, iend);
249 #else
250 struct gomp_thread *thr = gomp_thread ();
251 gomp_mutex_lock (&thr->ts.work_share->lock);
252 ret = gomp_iter_dynamic_next_locked (istart, iend);
253 gomp_mutex_unlock (&thr->ts.work_share->lock);
254 #endif
256 return ret;
259 static bool
260 gomp_loop_guided_next (long *istart, long *iend)
262 bool ret;
264 #ifdef HAVE_SYNC_BUILTINS
265 ret = gomp_iter_guided_next (istart, iend);
266 #else
267 struct gomp_thread *thr = gomp_thread ();
268 gomp_mutex_lock (&thr->ts.work_share->lock);
269 ret = gomp_iter_guided_next_locked (istart, iend);
270 gomp_mutex_unlock (&thr->ts.work_share->lock);
271 #endif
273 return ret;
276 bool
277 GOMP_loop_runtime_next (long *istart, long *iend)
279 struct gomp_thread *thr = gomp_thread ();
281 switch (thr->ts.work_share->sched)
283 case GFS_STATIC:
284 return gomp_loop_static_next (istart, iend);
285 case GFS_DYNAMIC:
286 return gomp_loop_dynamic_next (istart, iend);
287 case GFS_GUIDED:
288 return gomp_loop_guided_next (istart, iend);
289 default:
290 abort ();
294 /* The *_ordered_*_next routines are called when the thread completes
295 processing of the iteration block currently assigned to it.
297 Returns true if there is work remaining to be performed; *ISTART and
298 *IEND are filled with a new iteration block. Returns false if all work
299 has been assigned. */
301 static bool
302 gomp_loop_ordered_static_next (long *istart, long *iend)
304 struct gomp_thread *thr = gomp_thread ();
305 int test;
307 gomp_ordered_sync ();
308 gomp_mutex_lock (&thr->ts.work_share->lock);
309 test = gomp_iter_static_next (istart, iend);
310 if (test >= 0)
311 gomp_ordered_static_next ();
312 gomp_mutex_unlock (&thr->ts.work_share->lock);
314 return test == 0;
317 static bool
318 gomp_loop_ordered_dynamic_next (long *istart, long *iend)
320 struct gomp_thread *thr = gomp_thread ();
321 bool ret;
323 gomp_ordered_sync ();
324 gomp_mutex_lock (&thr->ts.work_share->lock);
325 ret = gomp_iter_dynamic_next_locked (istart, iend);
326 if (ret)
327 gomp_ordered_next ();
328 else
329 gomp_ordered_last ();
330 gomp_mutex_unlock (&thr->ts.work_share->lock);
332 return ret;
335 static bool
336 gomp_loop_ordered_guided_next (long *istart, long *iend)
338 struct gomp_thread *thr = gomp_thread ();
339 bool ret;
341 gomp_ordered_sync ();
342 gomp_mutex_lock (&thr->ts.work_share->lock);
343 ret = gomp_iter_guided_next_locked (istart, iend);
344 if (ret)
345 gomp_ordered_next ();
346 else
347 gomp_ordered_last ();
348 gomp_mutex_unlock (&thr->ts.work_share->lock);
350 return ret;
353 bool
354 GOMP_loop_ordered_runtime_next (long *istart, long *iend)
356 struct gomp_thread *thr = gomp_thread ();
358 switch (thr->ts.work_share->sched)
360 case GFS_STATIC:
361 return gomp_loop_ordered_static_next (istart, iend);
362 case GFS_DYNAMIC:
363 return gomp_loop_ordered_dynamic_next (istart, iend);
364 case GFS_GUIDED:
365 return gomp_loop_ordered_guided_next (istart, iend);
366 default:
367 abort ();
371 /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
372 to avoid one synchronization once we get into the loop. */
374 static void
375 gomp_parallel_loop_start (void (*fn) (void *), void *data,
376 unsigned num_threads, long start, long end,
377 long incr, enum gomp_schedule_type sched,
378 long chunk_size)
380 struct gomp_work_share *ws;
382 num_threads = gomp_resolve_num_threads (num_threads);
383 ws = gomp_new_work_share (false, num_threads);
384 gomp_loop_init (ws, start, end, incr, sched, chunk_size);
385 gomp_team_start (fn, data, num_threads, ws);
388 void
389 GOMP_parallel_loop_static_start (void (*fn) (void *), void *data,
390 unsigned num_threads, long start, long end,
391 long incr, long chunk_size)
393 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
394 GFS_STATIC, chunk_size);
397 void
398 GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data,
399 unsigned num_threads, long start, long end,
400 long incr, long chunk_size)
402 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
403 GFS_DYNAMIC, chunk_size);
406 void
407 GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data,
408 unsigned num_threads, long start, long end,
409 long incr, long chunk_size)
411 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
412 GFS_GUIDED, chunk_size);
415 void
416 GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data,
417 unsigned num_threads, long start, long end,
418 long incr)
420 gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
421 gomp_run_sched_var, gomp_run_sched_chunk);
424 /* The GOMP_loop_end* routines are called after the thread is told that
425 all loop iterations are complete. This first version synchronizes
426 all threads; the nowait version does not. */
428 void
429 GOMP_loop_end (void)
431 gomp_work_share_end ();
434 void
435 GOMP_loop_end_nowait (void)
437 gomp_work_share_end_nowait ();
441 /* We use static functions above so that we're sure that the "runtime"
442 function can defer to the proper routine without interposition. We
443 export the static function with a strong alias when possible, or with
444 a wrapper function otherwise. */
446 #ifdef HAVE_ATTRIBUTE_ALIAS
447 extern __typeof(gomp_loop_static_start) GOMP_loop_static_start
448 __attribute__((alias ("gomp_loop_static_start")));
449 extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start
450 __attribute__((alias ("gomp_loop_dynamic_start")));
451 extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start
452 __attribute__((alias ("gomp_loop_guided_start")));
454 extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start
455 __attribute__((alias ("gomp_loop_ordered_static_start")));
456 extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start
457 __attribute__((alias ("gomp_loop_ordered_dynamic_start")));
458 extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start
459 __attribute__((alias ("gomp_loop_ordered_guided_start")));
461 extern __typeof(gomp_loop_static_next) GOMP_loop_static_next
462 __attribute__((alias ("gomp_loop_static_next")));
463 extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next
464 __attribute__((alias ("gomp_loop_dynamic_next")));
465 extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next
466 __attribute__((alias ("gomp_loop_guided_next")));
468 extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next
469 __attribute__((alias ("gomp_loop_ordered_static_next")));
470 extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next
471 __attribute__((alias ("gomp_loop_ordered_dynamic_next")));
472 extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next
473 __attribute__((alias ("gomp_loop_ordered_guided_next")));
474 #else
475 bool
476 GOMP_loop_static_start (long start, long end, long incr, long chunk_size,
477 long *istart, long *iend)
479 return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend);
482 bool
483 GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size,
484 long *istart, long *iend)
486 return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
489 bool
490 GOMP_loop_guided_start (long start, long end, long incr, long chunk_size,
491 long *istart, long *iend)
493 return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
496 bool
497 GOMP_loop_ordered_static_start (long start, long end, long incr,
498 long chunk_size, long *istart, long *iend)
500 return gomp_loop_ordered_static_start (start, end, incr, chunk_size,
501 istart, iend);
504 bool
505 GOMP_loop_ordered_dynamic_start (long start, long end, long incr,
506 long chunk_size, long *istart, long *iend)
508 return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size,
509 istart, iend);
512 bool
513 GOMP_loop_ordered_guided_start (long start, long end, long incr,
514 long chunk_size, long *istart, long *iend)
516 return gomp_loop_ordered_guided_start (start, end, incr, chunk_size,
517 istart, iend);
520 bool
521 GOMP_loop_static_next (long *istart, long *iend)
523 return gomp_loop_static_next (istart, iend);
526 bool
527 GOMP_loop_dynamic_next (long *istart, long *iend)
529 return gomp_loop_dynamic_next (istart, iend);
532 bool
533 GOMP_loop_guided_next (long *istart, long *iend)
535 return gomp_loop_guided_next (istart, iend);
538 bool
539 GOMP_loop_ordered_static_next (long *istart, long *iend)
541 return gomp_loop_ordered_static_next (istart, iend);
544 bool
545 GOMP_loop_ordered_dynamic_next (long *istart, long *iend)
547 return gomp_loop_ordered_dynamic_next (istart, iend);
550 bool
551 GOMP_loop_ordered_guided_next (long *istart, long *iend)
553 return gomp_loop_ordered_guided_next (istart, iend);
555 #endif