* config/i386/i386.c (ix86_legitimize_address): Declare
[official-gcc.git] / libgomp / config / linux / bar.c
blobea1e08bec7247af4feef58b78e5b4120e62ffd5d
1 /* Copyright (C) 2005-2014 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This is a Linux specific implementation of a barrier synchronization
26 mechanism for libgomp. This type is private to the library. This
27 implementation uses atomic instructions and the futex syscall. */
29 #include <limits.h>
30 #include "wait.h"
33 void
34 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
36 if (__builtin_expect (state & BAR_WAS_LAST, 0))
38 /* Next time we'll be awaiting TOTAL threads again. */
39 bar->awaited = bar->total;
40 __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
41 MEMMODEL_RELEASE);
42 futex_wake ((int *) &bar->generation, INT_MAX);
44 else
47 do_wait ((int *) &bar->generation, state);
48 while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) == state);
52 void
53 gomp_barrier_wait (gomp_barrier_t *bar)
55 gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
58 /* Like gomp_barrier_wait, except that if the encountering thread
59 is not the last one to hit the barrier, it returns immediately.
60 The intended usage is that a thread which intends to gomp_barrier_destroy
61 this barrier calls gomp_barrier_wait, while all other threads
62 call gomp_barrier_wait_last. When gomp_barrier_wait returns,
63 the barrier can be safely destroyed. */
65 void
66 gomp_barrier_wait_last (gomp_barrier_t *bar)
68 gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
69 if (state & BAR_WAS_LAST)
70 gomp_barrier_wait_end (bar, state);
73 void
74 gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
76 futex_wake ((int *) &bar->generation, count == 0 ? INT_MAX : count);
79 void
80 gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
82 unsigned int generation, gen;
84 if (__builtin_expect (state & BAR_WAS_LAST, 0))
86 /* Next time we'll be awaiting TOTAL threads again. */
87 struct gomp_thread *thr = gomp_thread ();
88 struct gomp_team *team = thr->ts.team;
90 bar->awaited = bar->total;
91 team->work_share_cancelled = 0;
92 if (__builtin_expect (team->task_count, 0))
94 gomp_barrier_handle_tasks (state);
95 state &= ~BAR_WAS_LAST;
97 else
99 state &= ~BAR_CANCELLED;
100 state += BAR_INCR - BAR_WAS_LAST;
101 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
102 futex_wake ((int *) &bar->generation, INT_MAX);
103 return;
107 generation = state;
108 state &= ~BAR_CANCELLED;
111 do_wait ((int *) &bar->generation, generation);
112 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
113 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
115 gomp_barrier_handle_tasks (state);
116 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
118 generation |= gen & BAR_WAITING_FOR_TASK;
120 while (gen != state + BAR_INCR);
123 void
124 gomp_team_barrier_wait (gomp_barrier_t *bar)
126 gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
129 void
130 gomp_team_barrier_wait_final (gomp_barrier_t *bar)
132 gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
133 if (__builtin_expect (state & BAR_WAS_LAST, 0))
134 bar->awaited_final = bar->total;
135 gomp_team_barrier_wait_end (bar, state);
138 bool
139 gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
140 gomp_barrier_state_t state)
142 unsigned int generation, gen;
144 if (__builtin_expect (state & BAR_WAS_LAST, 0))
146 /* Next time we'll be awaiting TOTAL threads again. */
147 /* BAR_CANCELLED should never be set in state here, because
148 cancellation means that at least one of the threads has been
149 cancelled, thus on a cancellable barrier we should never see
150 all threads to arrive. */
151 struct gomp_thread *thr = gomp_thread ();
152 struct gomp_team *team = thr->ts.team;
154 bar->awaited = bar->total;
155 team->work_share_cancelled = 0;
156 if (__builtin_expect (team->task_count, 0))
158 gomp_barrier_handle_tasks (state);
159 state &= ~BAR_WAS_LAST;
161 else
163 state += BAR_INCR - BAR_WAS_LAST;
164 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
165 futex_wake ((int *) &bar->generation, INT_MAX);
166 return false;
170 if (__builtin_expect (state & BAR_CANCELLED, 0))
171 return true;
173 generation = state;
176 do_wait ((int *) &bar->generation, generation);
177 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
178 if (__builtin_expect (gen & BAR_CANCELLED, 0))
179 return true;
180 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
182 gomp_barrier_handle_tasks (state);
183 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
185 generation |= gen & BAR_WAITING_FOR_TASK;
187 while (gen != state + BAR_INCR);
189 return false;
192 bool
193 gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
195 return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
198 void
199 gomp_team_barrier_cancel (struct gomp_team *team)
201 gomp_mutex_lock (&team->task_lock);
202 if (team->barrier.generation & BAR_CANCELLED)
204 gomp_mutex_unlock (&team->task_lock);
205 return;
207 team->barrier.generation |= BAR_CANCELLED;
208 gomp_mutex_unlock (&team->task_lock);
209 futex_wake ((int *) &team->barrier.generation, INT_MAX);