Daily bump.
[official-gcc.git] / libgomp / ordered.c
blob69ca217b4d5502072c24537b695def0cb96b8988
1 /* Copyright (C) 2005-2015 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file handles the ORDERED construct. */
28 #include "libgomp.h"
31 /* This function is called when first allocating an iteration block. That
32 is, the thread is not currently on the queue. The work-share lock must
33 be held on entry. */
35 void
36 gomp_ordered_first (void)
38 struct gomp_thread *thr = gomp_thread ();
39 struct gomp_team *team = thr->ts.team;
40 struct gomp_work_share *ws = thr->ts.work_share;
41 unsigned index;
43 /* Work share constructs can be orphaned. */
44 if (team == NULL || team->nthreads == 1)
45 return;
47 index = ws->ordered_cur + ws->ordered_num_used;
48 if (index >= team->nthreads)
49 index -= team->nthreads;
50 ws->ordered_team_ids[index] = thr->ts.team_id;
52 /* If this is the first and only thread in the queue, then there is
53 no one to release us when we get to our ordered section. Post to
54 our own release queue now so that we won't block later. */
55 if (ws->ordered_num_used++ == 0)
56 gomp_sem_post (team->ordered_release[thr->ts.team_id]);
59 /* This function is called when completing the last iteration block. That
60 is, there are no more iterations to perform and so the thread should be
61 removed from the queue entirely. Because of the way ORDERED blocks are
62 managed, it follows that we currently own access to the ORDERED block,
63 and should now pass it on to the next thread. The work-share lock must
64 be held on entry. */
66 void
67 gomp_ordered_last (void)
69 struct gomp_thread *thr = gomp_thread ();
70 struct gomp_team *team = thr->ts.team;
71 struct gomp_work_share *ws = thr->ts.work_share;
72 unsigned next_id;
74 /* Work share constructs can be orphaned. */
75 if (team == NULL || team->nthreads == 1)
76 return;
78 /* We're no longer the owner. */
79 ws->ordered_owner = -1;
81 /* If we're not the last thread in the queue, then wake the next. */
82 if (--ws->ordered_num_used > 0)
84 unsigned next = ws->ordered_cur + 1;
85 if (next == team->nthreads)
86 next = 0;
87 ws->ordered_cur = next;
89 next_id = ws->ordered_team_ids[next];
90 gomp_sem_post (team->ordered_release[next_id]);
95 /* This function is called when allocating a subsequent allocation block.
96 That is, we're done with the current iteration block and we're allocating
97 another. This is the logical combination of a call to gomp_ordered_last
98 followed by a call to gomp_ordered_first. The work-share lock must be
99 held on entry. */
101 void
102 gomp_ordered_next (void)
104 struct gomp_thread *thr = gomp_thread ();
105 struct gomp_team *team = thr->ts.team;
106 struct gomp_work_share *ws = thr->ts.work_share;
107 unsigned index, next_id;
109 /* Work share constructs can be orphaned. */
110 if (team == NULL || team->nthreads == 1)
111 return;
113 /* We're no longer the owner. */
114 ws->ordered_owner = -1;
116 /* If there's only one thread in the queue, that must be us. */
117 if (ws->ordered_num_used == 1)
119 /* We have a similar situation as in gomp_ordered_first
120 where we need to post to our own release semaphore. */
121 gomp_sem_post (team->ordered_release[thr->ts.team_id]);
122 return;
125 /* If the queue is entirely full, then we move ourself to the end of
126 the queue merely by incrementing ordered_cur. Only if it's not
127 full do we have to write our id. */
128 if (ws->ordered_num_used < team->nthreads)
130 index = ws->ordered_cur + ws->ordered_num_used;
131 if (index >= team->nthreads)
132 index -= team->nthreads;
133 ws->ordered_team_ids[index] = thr->ts.team_id;
136 index = ws->ordered_cur + 1;
137 if (index == team->nthreads)
138 index = 0;
139 ws->ordered_cur = index;
141 next_id = ws->ordered_team_ids[index];
142 gomp_sem_post (team->ordered_release[next_id]);
146 /* This function is called when a statically scheduled loop is first
147 being created. */
149 void
150 gomp_ordered_static_init (void)
152 struct gomp_thread *thr = gomp_thread ();
153 struct gomp_team *team = thr->ts.team;
155 if (team == NULL || team->nthreads == 1)
156 return;
158 gomp_sem_post (team->ordered_release[0]);
161 /* This function is called when a statically scheduled loop is moving to
162 the next allocation block. Static schedules are not first come first
163 served like the others, so we're to move to the numerically next thread,
164 not the next thread on a list. The work-share lock should *not* be held
165 on entry. */
167 void
168 gomp_ordered_static_next (void)
170 struct gomp_thread *thr = gomp_thread ();
171 struct gomp_team *team = thr->ts.team;
172 struct gomp_work_share *ws = thr->ts.work_share;
173 unsigned id = thr->ts.team_id;
175 if (team == NULL || team->nthreads == 1)
176 return;
178 ws->ordered_owner = -1;
180 /* This thread currently owns the lock. Increment the owner. */
181 if (++id == team->nthreads)
182 id = 0;
183 ws->ordered_team_ids[0] = id;
184 gomp_sem_post (team->ordered_release[id]);
187 /* This function is called when we need to assert that the thread owns the
188 ordered section. Due to the problem of posted-but-not-waited semaphores,
189 this needs to happen before completing a loop iteration. */
191 void
192 gomp_ordered_sync (void)
194 struct gomp_thread *thr = gomp_thread ();
195 struct gomp_team *team = thr->ts.team;
196 struct gomp_work_share *ws = thr->ts.work_share;
198 /* Work share constructs can be orphaned. But this clearly means that
199 we are the only thread, and so we automatically own the section. */
200 if (team == NULL || team->nthreads == 1)
201 return;
203 /* ??? I believe it to be safe to access this data without taking the
204 ws->lock. The only presumed race condition is with the previous
205 thread on the queue incrementing ordered_cur such that it points
206 to us, concurrently with our check below. But our team_id is
207 already present in the queue, and the other thread will always
208 post to our release semaphore. So the two cases are that we will
209 either win the race an momentarily block on the semaphore, or lose
210 the race and find the semaphore already unlocked and so not block.
211 Either way we get correct results.
212 However, there is an implicit flush on entry to an ordered region,
213 so we do need to have a barrier here. If we were taking a lock
214 this could be MEMMODEL_RELEASE since the acquire would be coverd
215 by the lock. */
217 __atomic_thread_fence (MEMMODEL_ACQ_REL);
218 if (ws->ordered_owner != thr->ts.team_id)
220 gomp_sem_wait (team->ordered_release[thr->ts.team_id]);
221 ws->ordered_owner = thr->ts.team_id;
225 /* This function is called by user code when encountering the start of an
226 ORDERED block. We must check to see if the current thread is at the
227 head of the queue, and if not, block. */
229 #ifdef HAVE_ATTRIBUTE_ALIAS
230 extern void GOMP_ordered_start (void)
231 __attribute__((alias ("gomp_ordered_sync")));
232 #else
233 void
234 GOMP_ordered_start (void)
236 gomp_ordered_sync ();
238 #endif
240 /* This function is called by user code when encountering the end of an
241 ORDERED block. With the current ORDERED implementation there's nothing
242 for us to do.
244 However, the current implementation has a flaw in that it does not allow
245 the next thread into the ORDERED section immediately after the current
246 thread exits the ORDERED section in its last iteration. The existance
247 of this function allows the implementation to change. */
249 void
250 GOMP_ordered_end (void)