* gcov.c (create_file_names): Properly handle UNIX and DOS
[official-gcc.git] / libgomp / iter_ull.c
blobd6262dafee5af65320d10d1202fafd0e009e9255
1 /* Copyright (C) 2005, 2008 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file contains routines for managing work-share iteration, both
29 for loops and sections. */
31 #include "libgomp.h"
32 #include <stdlib.h>
34 typedef unsigned long long gomp_ull;
36 /* This function implements the STATIC scheduling method. The caller should
37 iterate *pstart <= x < *pend. Return zero if there are more iterations
38 to perform; nonzero if not. Return less than 0 if this thread had
39 received the absolutely last iteration. */
41 int
42 gomp_iter_ull_static_next (gomp_ull *pstart, gomp_ull *pend)
44 struct gomp_thread *thr = gomp_thread ();
45 struct gomp_team *team = thr->ts.team;
46 struct gomp_work_share *ws = thr->ts.work_share;
47 unsigned long nthreads = team ? team->nthreads : 1;
49 if (thr->ts.static_trip == -1)
50 return -1;
52 /* Quick test for degenerate teams and orphaned constructs. */
53 if (nthreads == 1)
55 *pstart = ws->next_ull;
56 *pend = ws->end_ull;
57 thr->ts.static_trip = -1;
58 return ws->next_ull == ws->end_ull;
61 /* We interpret chunk_size zero as "unspecified", which means that we
62 should break up the iterations such that each thread makes only one
63 trip through the outer loop. */
64 if (ws->chunk_size_ull == 0)
66 gomp_ull n, q, i, s0, e0, s, e;
68 if (thr->ts.static_trip > 0)
69 return 1;
71 /* Compute the total number of iterations. */
72 if (__builtin_expect (ws->mode, 0) == 0)
73 n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull;
74 else
75 n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull;
76 i = thr->ts.team_id;
78 /* Compute the "zero-based" start and end points. That is, as
79 if the loop began at zero and incremented by one. */
80 q = n / nthreads;
81 q += (q * nthreads != n);
82 s0 = q * i;
83 e0 = s0 + q;
84 if (e0 > n)
85 e0 = n;
87 /* Notice when no iterations allocated for this thread. */
88 if (s0 >= e0)
90 thr->ts.static_trip = 1;
91 return 1;
94 /* Transform these to the actual start and end numbers. */
95 s = s0 * ws->incr_ull + ws->next_ull;
96 e = e0 * ws->incr_ull + ws->next_ull;
98 *pstart = s;
99 *pend = e;
100 thr->ts.static_trip = (e0 == n ? -1 : 1);
101 return 0;
103 else
105 gomp_ull n, s0, e0, i, c, s, e;
107 /* Otherwise, each thread gets exactly chunk_size iterations
108 (if available) each time through the loop. */
110 if (__builtin_expect (ws->mode, 0) == 0)
111 n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull;
112 else
113 n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull;
114 i = thr->ts.team_id;
115 c = ws->chunk_size_ull;
117 /* Initial guess is a C sized chunk positioned nthreads iterations
118 in, offset by our thread number. */
119 s0 = (thr->ts.static_trip * (gomp_ull) nthreads + i) * c;
120 e0 = s0 + c;
122 /* Detect overflow. */
123 if (s0 >= n)
124 return 1;
125 if (e0 > n)
126 e0 = n;
128 /* Transform these to the actual start and end numbers. */
129 s = s0 * ws->incr_ull + ws->next_ull;
130 e = e0 * ws->incr_ull + ws->next_ull;
132 *pstart = s;
133 *pend = e;
135 if (e0 == n)
136 thr->ts.static_trip = -1;
137 else
138 thr->ts.static_trip++;
139 return 0;
144 /* This function implements the DYNAMIC scheduling method. Arguments are
145 as for gomp_iter_ull_static_next. This function must be called with
146 ws->lock held. */
148 bool
149 gomp_iter_ull_dynamic_next_locked (gomp_ull *pstart, gomp_ull *pend)
151 struct gomp_thread *thr = gomp_thread ();
152 struct gomp_work_share *ws = thr->ts.work_share;
153 gomp_ull start, end, chunk, left;
155 start = ws->next_ull;
156 if (start == ws->end_ull)
157 return false;
159 chunk = ws->chunk_size_ull;
160 left = ws->end_ull - start;
161 if (__builtin_expect (ws->mode & 2, 0))
163 if (chunk < left)
164 chunk = left;
166 else
168 if (chunk > left)
169 chunk = left;
171 end = start + chunk;
173 ws->next_ull = end;
174 *pstart = start;
175 *pend = end;
176 return true;
180 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
181 /* Similar, but doesn't require the lock held, and uses compare-and-swap
182 instead. Note that the only memory value that changes is ws->next_ull. */
184 bool
185 gomp_iter_ull_dynamic_next (gomp_ull *pstart, gomp_ull *pend)
187 struct gomp_thread *thr = gomp_thread ();
188 struct gomp_work_share *ws = thr->ts.work_share;
189 gomp_ull start, end, nend, chunk;
191 end = ws->end_ull;
192 chunk = ws->chunk_size_ull;
194 if (__builtin_expect (ws->mode & 1, 1))
196 gomp_ull tmp = __sync_fetch_and_add (&ws->next_ull, chunk);
197 if (__builtin_expect (ws->mode & 2, 0) == 0)
199 if (tmp >= end)
200 return false;
201 nend = tmp + chunk;
202 if (nend > end)
203 nend = end;
204 *pstart = tmp;
205 *pend = nend;
206 return true;
208 else
210 if (tmp <= end)
211 return false;
212 nend = tmp + chunk;
213 if (nend < end)
214 nend = end;
215 *pstart = tmp;
216 *pend = nend;
217 return true;
221 start = ws->next_ull;
222 while (1)
224 gomp_ull left = end - start;
225 gomp_ull tmp;
227 if (start == end)
228 return false;
230 if (__builtin_expect (ws->mode & 2, 0))
232 if (chunk < left)
233 chunk = left;
235 else
237 if (chunk > left)
238 chunk = left;
240 nend = start + chunk;
242 tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend);
243 if (__builtin_expect (tmp == start, 1))
244 break;
246 start = tmp;
249 *pstart = start;
250 *pend = nend;
251 return true;
253 #endif /* HAVE_SYNC_BUILTINS */
256 /* This function implements the GUIDED scheduling method. Arguments are
257 as for gomp_iter_ull_static_next. This function must be called with the
258 work share lock held. */
260 bool
261 gomp_iter_ull_guided_next_locked (gomp_ull *pstart, gomp_ull *pend)
263 struct gomp_thread *thr = gomp_thread ();
264 struct gomp_work_share *ws = thr->ts.work_share;
265 struct gomp_team *team = thr->ts.team;
266 gomp_ull nthreads = team ? team->nthreads : 1;
267 gomp_ull n, q;
268 gomp_ull start, end;
270 if (ws->next_ull == ws->end_ull)
271 return false;
273 start = ws->next_ull;
274 if (__builtin_expect (ws->mode, 0) == 0)
275 n = (ws->end_ull - start) / ws->incr_ull;
276 else
277 n = (start - ws->end_ull) / -ws->incr_ull;
278 q = (n + nthreads - 1) / nthreads;
280 if (q < ws->chunk_size_ull)
281 q = ws->chunk_size_ull;
282 if (q <= n)
283 end = start + q * ws->incr_ull;
284 else
285 end = ws->end_ull;
287 ws->next_ull = end;
288 *pstart = start;
289 *pend = end;
290 return true;
293 #if defined HAVE_SYNC_BUILTINS && defined __LP64__
294 /* Similar, but doesn't require the lock held, and uses compare-and-swap
295 instead. Note that the only memory value that changes is ws->next_ull. */
297 bool
298 gomp_iter_ull_guided_next (gomp_ull *pstart, gomp_ull *pend)
300 struct gomp_thread *thr = gomp_thread ();
301 struct gomp_work_share *ws = thr->ts.work_share;
302 struct gomp_team *team = thr->ts.team;
303 gomp_ull nthreads = team ? team->nthreads : 1;
304 gomp_ull start, end, nend, incr;
305 gomp_ull chunk_size;
307 start = ws->next_ull;
308 end = ws->end_ull;
309 incr = ws->incr_ull;
310 chunk_size = ws->chunk_size_ull;
312 while (1)
314 gomp_ull n, q;
315 gomp_ull tmp;
317 if (start == end)
318 return false;
320 if (__builtin_expect (ws->mode, 0) == 0)
321 n = (end - start) / incr;
322 else
323 n = (start - end) / -incr;
324 q = (n + nthreads - 1) / nthreads;
326 if (q < chunk_size)
327 q = chunk_size;
328 if (__builtin_expect (q <= n, 1))
329 nend = start + q * incr;
330 else
331 nend = end;
333 tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend);
334 if (__builtin_expect (tmp == start, 1))
335 break;
337 start = tmp;
340 *pstart = start;
341 *pend = nend;
342 return true;
344 #endif /* HAVE_SYNC_BUILTINS */