1 /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This file handles the ORDERED construct. */
30 /* This function is called when first allocating an iteration block. That
31 is, the thread is not currently on the queue. The work-share lock must
35 gomp_ordered_first (void)
37 struct gomp_thread
*thr
= gomp_thread ();
38 struct gomp_team
*team
= thr
->ts
.team
;
39 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
42 /* Work share constructs can be orphaned. */
43 if (team
== NULL
|| team
->nthreads
== 1)
46 index
= ws
->ordered_cur
+ ws
->ordered_num_used
;
47 if (index
>= team
->nthreads
)
48 index
-= team
->nthreads
;
49 ws
->ordered_team_ids
[index
] = thr
->ts
.team_id
;
51 /* If this is the first and only thread in the queue, then there is
52 no one to release us when we get to our ordered section. Post to
53 our own release queue now so that we won't block later. */
54 if (ws
->ordered_num_used
++ == 0)
55 gomp_sem_post (team
->ordered_release
[thr
->ts
.team_id
]);
58 /* This function is called when completing the last iteration block. That
59 is, there are no more iterations to perform and so the thread should be
60 removed from the queue entirely. Because of the way ORDERED blocks are
61 managed, it follows that we currently own access to the ORDERED block,
62 and should now pass it on to the next thread. The work-share lock must
66 gomp_ordered_last (void)
68 struct gomp_thread
*thr
= gomp_thread ();
69 struct gomp_team
*team
= thr
->ts
.team
;
70 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
73 /* Work share constructs can be orphaned. */
74 if (team
== NULL
|| team
->nthreads
== 1)
77 /* We're no longer the owner. */
78 ws
->ordered_owner
= -1;
80 /* If we're not the last thread in the queue, then wake the next. */
81 if (--ws
->ordered_num_used
> 0)
83 unsigned next
= ws
->ordered_cur
+ 1;
84 if (next
== team
->nthreads
)
86 ws
->ordered_cur
= next
;
88 next_id
= ws
->ordered_team_ids
[next
];
89 gomp_sem_post (team
->ordered_release
[next_id
]);
94 /* This function is called when allocating a subsequent allocation block.
95 That is, we're done with the current iteration block and we're allocating
96 another. This is the logical combination of a call to gomp_ordered_last
97 followed by a call to gomp_ordered_first. The work-share lock must be
101 gomp_ordered_next (void)
103 struct gomp_thread
*thr
= gomp_thread ();
104 struct gomp_team
*team
= thr
->ts
.team
;
105 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
106 unsigned index
, next_id
;
108 /* Work share constructs can be orphaned. */
109 if (team
== NULL
|| team
->nthreads
== 1)
112 /* We're no longer the owner. */
113 ws
->ordered_owner
= -1;
115 /* If there's only one thread in the queue, that must be us. */
116 if (ws
->ordered_num_used
== 1)
118 /* We have a similar situation as in gomp_ordered_first
119 where we need to post to our own release semaphore. */
120 gomp_sem_post (team
->ordered_release
[thr
->ts
.team_id
]);
124 /* If the queue is entirely full, then we move ourself to the end of
125 the queue merely by incrementing ordered_cur. Only if it's not
126 full do we have to write our id. */
127 if (ws
->ordered_num_used
< team
->nthreads
)
129 index
= ws
->ordered_cur
+ ws
->ordered_num_used
;
130 if (index
>= team
->nthreads
)
131 index
-= team
->nthreads
;
132 ws
->ordered_team_ids
[index
] = thr
->ts
.team_id
;
135 index
= ws
->ordered_cur
+ 1;
136 if (index
== team
->nthreads
)
138 ws
->ordered_cur
= index
;
140 next_id
= ws
->ordered_team_ids
[index
];
141 gomp_sem_post (team
->ordered_release
[next_id
]);
145 /* This function is called when a statically scheduled loop is first
149 gomp_ordered_static_init (void)
151 struct gomp_thread
*thr
= gomp_thread ();
152 struct gomp_team
*team
= thr
->ts
.team
;
154 if (team
== NULL
|| team
->nthreads
== 1)
157 gomp_sem_post (team
->ordered_release
[0]);
160 /* This function is called when a statically scheduled loop is moving to
161 the next allocation block. Static schedules are not first come first
162 served like the others, so we're to move to the numerically next thread,
163 not the next thread on a list. The work-share lock should *not* be held
167 gomp_ordered_static_next (void)
169 struct gomp_thread
*thr
= gomp_thread ();
170 struct gomp_team
*team
= thr
->ts
.team
;
171 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
172 unsigned id
= thr
->ts
.team_id
;
174 if (team
== NULL
|| team
->nthreads
== 1)
177 ws
->ordered_owner
= -1;
179 /* This thread currently owns the lock. Increment the owner. */
180 if (++id
== team
->nthreads
)
182 ws
->ordered_team_ids
[0] = id
;
183 gomp_sem_post (team
->ordered_release
[id
]);
186 /* This function is called when we need to assert that the thread owns the
187 ordered section. Due to the problem of posted-but-not-waited semaphores,
188 this needs to happen before completing a loop iteration. */
191 gomp_ordered_sync (void)
193 struct gomp_thread
*thr
= gomp_thread ();
194 struct gomp_team
*team
= thr
->ts
.team
;
195 struct gomp_work_share
*ws
= thr
->ts
.work_share
;
197 /* Work share constructs can be orphaned. But this clearly means that
198 we are the only thread, and so we automatically own the section. */
199 if (team
== NULL
|| team
->nthreads
== 1)
202 /* ??? I believe it to be safe to access this data without taking the
203 ws->lock. The only presumed race condition is with the previous
204 thread on the queue incrementing ordered_cur such that it points
205 to us, concurrently with our check below. But our team_id is
206 already present in the queue, and the other thread will always
207 post to our release semaphore. So the two cases are that we will
208 either win the race an momentarily block on the semaphore, or lose
209 the race and find the semaphore already unlocked and so not block.
210 Either way we get correct results.
211 However, there is an implicit flush on entry to an ordered region,
212 so we do need to have a barrier here. If we were taking a lock
213 this could be MEMMODEL_RELEASE since the acquire would be coverd
216 __atomic_thread_fence (MEMMODEL_ACQ_REL
);
217 if (ws
->ordered_owner
!= thr
->ts
.team_id
)
219 gomp_sem_wait (team
->ordered_release
[thr
->ts
.team_id
]);
220 ws
->ordered_owner
= thr
->ts
.team_id
;
224 /* This function is called by user code when encountering the start of an
225 ORDERED block. We must check to see if the current thread is at the
226 head of the queue, and if not, block. */
228 #ifdef HAVE_ATTRIBUTE_ALIAS
229 extern void GOMP_ordered_start (void)
230 __attribute__((alias ("gomp_ordered_sync")));
233 GOMP_ordered_start (void)
235 gomp_ordered_sync ();
239 /* This function is called by user code when encountering the end of an
240 ORDERED block. With the current ORDERED implementation there's nothing
243 However, the current implementation has a flaw in that it does not allow
244 the next thread into the ORDERED section immediately after the current
245 thread exits the ORDERED section in its last iteration. The existance
246 of this function allows the implementation to change. */
249 GOMP_ordered_end (void)