1 /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file contains routines for managing work-share iteration, both
26 for loops and sections. */
32 /* This function implements the STATIC scheduling method. The caller should
33 iterate *pstart <= x < *pend. Return zero if there are more iterations
34 to perform; nonzero if not. Return less than 0 if this thread had
35 received the absolutely last iteration. */
38 gomp_iter_static_next (long *pstart
, long *pend
)
40 struct gomp_thread
*thr
= gomp_thread ();
41 struct gomp_team
*team
= thr
->ts
.team
;
42 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
43 unsigned long nthreads
= team
? team
->nthreads
: 1;
45 if (thr
->ts
.static_trip
== -1)
48 /* Quick test for degenerate teams and orphaned constructs. */
53 thr
->ts
.static_trip
= -1;
54 return ws
->next
== ws
->end
;
57 /* We interpret chunk_size zero as "unspecified", which means that we
58 should break up the iterations such that each thread makes only one
59 trip through the outer loop. */
60 if (ws
->chunk_size
== 0)
62 unsigned long n
, q
, i
, t
;
66 if (thr
->ts
.static_trip
> 0)
69 /* Compute the total number of iterations. */
70 s
= ws
->incr
+ (ws
->incr
> 0 ? -1 : 1);
71 n
= (ws
->end
- ws
->next
+ s
) / ws
->incr
;
74 /* Compute the "zero-based" start and end points. That is, as
75 if the loop began at zero and incremented by one. */
86 /* Notice when no iterations allocated for this thread. */
89 thr
->ts
.static_trip
= 1;
93 /* Transform these to the actual start and end numbers. */
94 s
= (long)s0
* ws
->incr
+ ws
->next
;
95 e
= (long)e0
* ws
->incr
+ ws
->next
;
99 thr
->ts
.static_trip
= (e0
== n
? -1 : 1);
104 unsigned long n
, s0
, e0
, i
, c
;
107 /* Otherwise, each thread gets exactly chunk_size iterations
108 (if available) each time through the loop. */
110 s
= ws
->incr
+ (ws
->incr
> 0 ? -1 : 1);
111 n
= (ws
->end
- ws
->next
+ s
) / ws
->incr
;
115 /* Initial guess is a C sized chunk positioned nthreads iterations
116 in, offset by our thread number. */
117 s0
= (thr
->ts
.static_trip
* nthreads
+ i
) * c
;
120 /* Detect overflow. */
126 /* Transform these to the actual start and end numbers. */
127 s
= (long)s0
* ws
->incr
+ ws
->next
;
128 e
= (long)e0
* ws
->incr
+ ws
->next
;
134 thr
->ts
.static_trip
= -1;
136 thr
->ts
.static_trip
++;
142 /* This function implements the DYNAMIC scheduling method. Arguments are
143 as for gomp_iter_static_next. This function must be called with ws->lock
147 gomp_iter_dynamic_next_locked (long *pstart
, long *pend
)
149 struct gomp_thread
*thr
= gomp_thread ();
150 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
151 long start
, end
, chunk
, left
;
154 if (start
== ws
->end
)
157 chunk
= ws
->chunk_size
;
158 left
= ws
->end
- start
;
178 #ifdef HAVE_SYNC_BUILTINS
179 /* Similar, but doesn't require the lock held, and uses compare-and-swap
180 instead. Note that the only memory value that changes is ws->next. */
183 gomp_iter_dynamic_next (long *pstart
, long *pend
)
185 struct gomp_thread
*thr
= gomp_thread ();
186 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
187 long start
, end
, nend
, chunk
, incr
;
191 chunk
= ws
->chunk_size
;
193 if (__builtin_expect (ws
->mode
, 1))
195 long tmp
= __sync_fetch_and_add (&ws
->next
, chunk
);
223 long left
= end
- start
;
239 nend
= start
+ chunk
;
241 tmp
= __sync_val_compare_and_swap (&ws
->next
, start
, nend
);
242 if (__builtin_expect (tmp
== start
, 1))
252 #endif /* HAVE_SYNC_BUILTINS */
255 /* This function implements the GUIDED scheduling method. Arguments are
256 as for gomp_iter_static_next. This function must be called with the
257 work share lock held. */
260 gomp_iter_guided_next_locked (long *pstart
, long *pend
)
262 struct gomp_thread
*thr
= gomp_thread ();
263 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
264 struct gomp_team
*team
= thr
->ts
.team
;
265 unsigned long nthreads
= team
? team
->nthreads
: 1;
269 if (ws
->next
== ws
->end
)
273 n
= (ws
->end
- start
) / ws
->incr
;
274 q
= (n
+ nthreads
- 1) / nthreads
;
276 if (q
< ws
->chunk_size
)
279 end
= start
+ q
* ws
->incr
;
289 #ifdef HAVE_SYNC_BUILTINS
290 /* Similar, but doesn't require the lock held, and uses compare-and-swap
291 instead. Note that the only memory value that changes is ws->next. */
294 gomp_iter_guided_next (long *pstart
, long *pend
)
296 struct gomp_thread
*thr
= gomp_thread ();
297 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
298 struct gomp_team
*team
= thr
->ts
.team
;
299 unsigned long nthreads
= team
? team
->nthreads
: 1;
300 long start
, end
, nend
, incr
;
301 unsigned long chunk_size
;
306 chunk_size
= ws
->chunk_size
;
316 n
= (end
- start
) / incr
;
317 q
= (n
+ nthreads
- 1) / nthreads
;
321 if (__builtin_expect (q
<= n
, 1))
322 nend
= start
+ q
* incr
;
326 tmp
= __sync_val_compare_and_swap (&ws
->next
, start
, nend
);
327 if (__builtin_expect (tmp
== start
, 1))
337 #endif /* HAVE_SYNC_BUILTINS */