cp-demangle.c (d_substitution): Correct overflow check to avoid -fstrict-overflow...
[official-gcc.git] / libgomp / ordered.c
blob7c5d671996a5d0be921a51d11feac717bcaedff1
1 /* Copyright (C) 2005 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the ORDERED construct. */
30 #include "libgomp.h"
33 /* This function is called when first allocating an iteration block. That
34 is, the thread is not currently on the queue. The work-share lock must
35 be held on entry. */
37 void
38 gomp_ordered_first (void)
40 struct gomp_thread *thr = gomp_thread ();
41 struct gomp_team *team = thr->ts.team;
42 struct gomp_work_share *ws = thr->ts.work_share;
43 unsigned index;
45 /* Work share constructs can be orphaned. */
46 if (team == NULL || team->nthreads == 1)
47 return;
49 index = ws->ordered_cur + ws->ordered_num_used;
50 if (index >= team->nthreads)
51 index -= team->nthreads;
52 ws->ordered_team_ids[index] = thr->ts.team_id;
54 /* If this is the first and only thread in the queue, then there is
55 no one to release us when we get to our ordered section. Post to
56 our own release queue now so that we won't block later. */
57 if (ws->ordered_num_used++ == 0)
58 gomp_sem_post (team->ordered_release[thr->ts.team_id]);
61 /* This function is called when completing the last iteration block. That
62 is, there are no more iterations to perform and so the thread should be
63 removed from the queue entirely. Because of the way ORDERED blocks are
64 managed, it follows that we currently own access to the ORDERED block,
65 and should now pass it on to the next thread. The work-share lock must
66 be held on entry. */
68 void
69 gomp_ordered_last (void)
71 struct gomp_thread *thr = gomp_thread ();
72 struct gomp_team *team = thr->ts.team;
73 struct gomp_work_share *ws = thr->ts.work_share;
74 unsigned next_id;
76 /* Work share constructs can be orphaned. */
77 if (team == NULL || team->nthreads == 1)
78 return;
80 /* We're no longer the owner. */
81 ws->ordered_owner = -1;
83 /* If we're not the last thread in the queue, then wake the next. */
84 if (--ws->ordered_num_used > 0)
86 unsigned next = ws->ordered_cur + 1;
87 if (next == team->nthreads)
88 next = 0;
89 ws->ordered_cur = next;
91 next_id = ws->ordered_team_ids[next];
92 gomp_sem_post (team->ordered_release[next_id]);
97 /* This function is called when allocating a subsequent allocation block.
98 That is, we're done with the current iteration block and we're allocating
99 another. This is the logical combination of a call to gomp_ordered_last
100 followed by a call to gomp_ordered_first. The work-share lock must be
101 held on entry. */
103 void
104 gomp_ordered_next (void)
106 struct gomp_thread *thr = gomp_thread ();
107 struct gomp_team *team = thr->ts.team;
108 struct gomp_work_share *ws = thr->ts.work_share;
109 unsigned index, next_id;
111 /* Work share constructs can be orphaned. */
112 if (team == NULL || team->nthreads == 1)
113 return;
115 /* We're no longer the owner. */
116 ws->ordered_owner = -1;
118 /* If there's only one thread in the queue, that must be us. */
119 if (ws->ordered_num_used == 1)
121 /* We have a similar situation as in gomp_ordered_first
122 where we need to post to our own release semaphore. */
123 gomp_sem_post (team->ordered_release[thr->ts.team_id]);
124 return;
127 /* If the queue is entirely full, then we move ourself to the end of
128 the queue merely by incrementing ordered_cur. Only if it's not
129 full do we have to write our id. */
130 if (ws->ordered_num_used < team->nthreads)
132 index = ws->ordered_cur + ws->ordered_num_used;
133 if (index >= team->nthreads)
134 index -= team->nthreads;
135 ws->ordered_team_ids[index] = thr->ts.team_id;
138 index = ws->ordered_cur + 1;
139 if (index == team->nthreads)
140 index = 0;
141 ws->ordered_cur = index;
143 next_id = ws->ordered_team_ids[index];
144 gomp_sem_post (team->ordered_release[next_id]);
148 /* This function is called when a statically scheduled loop is first
149 being created. */
151 void
152 gomp_ordered_static_init (void)
154 struct gomp_thread *thr = gomp_thread ();
155 struct gomp_team *team = thr->ts.team;
157 if (team == NULL || team->nthreads == 1)
158 return;
160 gomp_sem_post (team->ordered_release[0]);
163 /* This function is called when a statically scheduled loop is moving to
164 the next allocation block. Static schedules are not first come first
165 served like the others, so we're to move to the numerically next thread,
166 not the next thread on a list. The work-share lock should *not* be held
167 on entry. */
169 void
170 gomp_ordered_static_next (void)
172 struct gomp_thread *thr = gomp_thread ();
173 struct gomp_team *team = thr->ts.team;
174 struct gomp_work_share *ws = thr->ts.work_share;
175 unsigned id = thr->ts.team_id;
177 if (team == NULL || team->nthreads == 1)
178 return;
180 ws->ordered_owner = -1;
182 /* This thread currently owns the lock. Increment the owner. */
183 if (++id == team->nthreads)
184 id = 0;
185 ws->ordered_team_ids[0] = id;
186 gomp_sem_post (team->ordered_release[id]);
189 /* This function is called when we need to assert that the thread owns the
190 ordered section. Due to the problem of posted-but-not-waited semaphores,
191 this needs to happen before completing a loop iteration. */
193 void
194 gomp_ordered_sync (void)
196 struct gomp_thread *thr = gomp_thread ();
197 struct gomp_team *team = thr->ts.team;
198 struct gomp_work_share *ws = thr->ts.work_share;
200 /* Work share constructs can be orphaned. But this clearly means that
201 we are the only thread, and so we automatically own the section. */
202 if (team == NULL || team->nthreads == 1)
203 return;
205 /* ??? I believe it to be safe to access this data without taking the
206 ws->lock. The only presumed race condition is with the previous
207 thread on the queue incrementing ordered_cur such that it points
208 to us, concurrently with our check below. But our team_id is
209 already present in the queue, and the other thread will always
210 post to our release semaphore. So the two cases are that we will
211 either win the race an momentarily block on the semaphore, or lose
212 the race and find the semaphore already unlocked and so not block.
213 Either way we get correct results. */
215 if (ws->ordered_owner != thr->ts.team_id)
217 gomp_sem_wait (team->ordered_release[thr->ts.team_id]);
218 ws->ordered_owner = thr->ts.team_id;
222 /* This function is called by user code when encountering the start of an
223 ORDERED block. We must check to see if the current thread is at the
224 head of the queue, and if not, block. */
226 #ifdef HAVE_ATTRIBUTE_ALIAS
227 extern void GOMP_ordered_start (void)
228 __attribute__((alias ("gomp_ordered_sync")));
229 #else
230 void
231 GOMP_ordered_start (void)
233 gomp_ordered_sync ();
235 #endif
237 /* This function is called by user code when encountering the end of an
238 ORDERED block. With the current ORDERED implementation there's nothing
239 for us to do.
241 However, the current implementation has a flaw in that it does not allow
242 the next thread into the ORDERED section immediately after the current
243 thread exits the ORDERED section in its last iteration. The existance
244 of this function allows the implementation to change. */
246 void
247 GOMP_ordered_end (void)