1 /* Copyright (C) 2005-2017 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This is a Linux specific implementation of a barrier synchronization
27 mechanism for libgomp. This type is private to the library. This
28 implementation uses atomic instructions and the futex syscall. */
35 gomp_barrier_wait_end (gomp_barrier_t
*bar
, gomp_barrier_state_t state
)
37 if (__builtin_expect (state
& BAR_WAS_LAST
, 0))
39 /* Next time we'll be awaiting TOTAL threads again. */
40 bar
->awaited
= bar
->total
;
41 __atomic_store_n (&bar
->generation
, bar
->generation
+ BAR_INCR
,
43 futex_wake ((int *) &bar
->generation
, INT_MAX
);
48 do_wait ((int *) &bar
->generation
, state
);
49 while (__atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
) == state
);
54 gomp_barrier_wait (gomp_barrier_t
*bar
)
56 gomp_barrier_wait_end (bar
, gomp_barrier_wait_start (bar
));
59 /* Like gomp_barrier_wait, except that if the encountering thread
60 is not the last one to hit the barrier, it returns immediately.
61 The intended usage is that a thread which intends to gomp_barrier_destroy
62 this barrier calls gomp_barrier_wait, while all other threads
63 call gomp_barrier_wait_last. When gomp_barrier_wait returns,
64 the barrier can be safely destroyed. */
67 gomp_barrier_wait_last (gomp_barrier_t
*bar
)
69 gomp_barrier_state_t state
= gomp_barrier_wait_start (bar
);
70 if (state
& BAR_WAS_LAST
)
71 gomp_barrier_wait_end (bar
, state
);
75 gomp_team_barrier_wake (gomp_barrier_t
*bar
, int count
)
77 futex_wake ((int *) &bar
->generation
, count
== 0 ? INT_MAX
: count
);
81 gomp_team_barrier_wait_end (gomp_barrier_t
*bar
, gomp_barrier_state_t state
)
83 unsigned int generation
, gen
;
85 if (__builtin_expect (state
& BAR_WAS_LAST
, 0))
87 /* Next time we'll be awaiting TOTAL threads again. */
88 struct gomp_thread
*thr
= gomp_thread ();
89 struct gomp_team
*team
= thr
->ts
.team
;
91 bar
->awaited
= bar
->total
;
92 team
->work_share_cancelled
= 0;
93 if (__builtin_expect (team
->task_count
, 0))
95 gomp_barrier_handle_tasks (state
);
96 state
&= ~BAR_WAS_LAST
;
100 state
&= ~BAR_CANCELLED
;
101 state
+= BAR_INCR
- BAR_WAS_LAST
;
102 __atomic_store_n (&bar
->generation
, state
, MEMMODEL_RELEASE
);
103 futex_wake ((int *) &bar
->generation
, INT_MAX
);
109 state
&= ~BAR_CANCELLED
;
112 do_wait ((int *) &bar
->generation
, generation
);
113 gen
= __atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
);
114 if (__builtin_expect (gen
& BAR_TASK_PENDING
, 0))
116 gomp_barrier_handle_tasks (state
);
117 gen
= __atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
);
119 generation
|= gen
& BAR_WAITING_FOR_TASK
;
121 while (gen
!= state
+ BAR_INCR
);
125 gomp_team_barrier_wait (gomp_barrier_t
*bar
)
127 gomp_team_barrier_wait_end (bar
, gomp_barrier_wait_start (bar
));
131 gomp_team_barrier_wait_final (gomp_barrier_t
*bar
)
133 gomp_barrier_state_t state
= gomp_barrier_wait_final_start (bar
);
134 if (__builtin_expect (state
& BAR_WAS_LAST
, 0))
135 bar
->awaited_final
= bar
->total
;
136 gomp_team_barrier_wait_end (bar
, state
);
140 gomp_team_barrier_wait_cancel_end (gomp_barrier_t
*bar
,
141 gomp_barrier_state_t state
)
143 unsigned int generation
, gen
;
145 if (__builtin_expect (state
& BAR_WAS_LAST
, 0))
147 /* Next time we'll be awaiting TOTAL threads again. */
148 /* BAR_CANCELLED should never be set in state here, because
149 cancellation means that at least one of the threads has been
150 cancelled, thus on a cancellable barrier we should never see
151 all threads to arrive. */
152 struct gomp_thread
*thr
= gomp_thread ();
153 struct gomp_team
*team
= thr
->ts
.team
;
155 bar
->awaited
= bar
->total
;
156 team
->work_share_cancelled
= 0;
157 if (__builtin_expect (team
->task_count
, 0))
159 gomp_barrier_handle_tasks (state
);
160 state
&= ~BAR_WAS_LAST
;
164 state
+= BAR_INCR
- BAR_WAS_LAST
;
165 __atomic_store_n (&bar
->generation
, state
, MEMMODEL_RELEASE
);
166 futex_wake ((int *) &bar
->generation
, INT_MAX
);
171 if (__builtin_expect (state
& BAR_CANCELLED
, 0))
177 do_wait ((int *) &bar
->generation
, generation
);
178 gen
= __atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
);
179 if (__builtin_expect (gen
& BAR_CANCELLED
, 0))
181 if (__builtin_expect (gen
& BAR_TASK_PENDING
, 0))
183 gomp_barrier_handle_tasks (state
);
184 gen
= __atomic_load_n (&bar
->generation
, MEMMODEL_ACQUIRE
);
186 generation
|= gen
& BAR_WAITING_FOR_TASK
;
188 while (gen
!= state
+ BAR_INCR
);
194 gomp_team_barrier_wait_cancel (gomp_barrier_t
*bar
)
196 return gomp_team_barrier_wait_cancel_end (bar
, gomp_barrier_wait_start (bar
));
200 gomp_team_barrier_cancel (struct gomp_team
*team
)
202 gomp_mutex_lock (&team
->task_lock
);
203 if (team
->barrier
.generation
& BAR_CANCELLED
)
205 gomp_mutex_unlock (&team
->task_lock
);
208 team
->barrier
.generation
|= BAR_CANCELLED
;
209 gomp_mutex_unlock (&team
->task_lock
);
210 futex_wake ((int *) &team
->barrier
.generation
, INT_MAX
);