2018-11-11 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libgomp / config / rtems / bar.c
blob2ee7909cddd6b127526036481e03ac2196e5e297
1 /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2 Contributed by Sebastian Huber <sebastian.huber@embedded-brains.de>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
25 /* This is the RTEMS implementation of a barrier synchronization
26 mechanism for libgomp. It is identical to the Linux implementation, except
27 that the futex API is slightly different. This type is private to the
28 library. */
30 #include "libgomp.h"
31 #include "bar.h"
32 #include <limits.h>
34 static gomp_barrier_t *
35 generation_to_barrier (int *addr)
37 return (gomp_barrier_t *)
38 ((char *) addr - __builtin_offsetof (gomp_barrier_t, generation));
41 static void
42 futex_wait (int *addr, int val)
44 gomp_barrier_t *bar = generation_to_barrier (addr);
45 _Futex_Wait (&bar->futex, addr, val);
48 static void
49 futex_wake (int *addr, int count)
51 gomp_barrier_t *bar = generation_to_barrier (addr);
52 _Futex_Wake (&bar->futex, count);
55 static int
56 do_spin (int *addr, int val)
58 unsigned long long i, count = gomp_spin_count_var;
60 if (__builtin_expect (gomp_managed_threads > gomp_available_cpus, 0))
61 count = gomp_throttled_spin_count_var;
62 for (i = 0; i < count; i++)
63 if (__builtin_expect (__atomic_load_n (addr, MEMMODEL_RELAXED) != val, 0))
64 return 0;
65 return 1;
68 static void
69 do_wait (int *addr, int val)
71 if (do_spin (addr, val))
72 futex_wait (addr, val);
75 /* Everything below this point should be identical to the Linux
76 implementation. */
78 void
79 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
81 if (__builtin_expect (state & BAR_WAS_LAST, 0))
83 /* Next time we'll be awaiting TOTAL threads again. */
84 bar->awaited = bar->total;
85 __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
86 MEMMODEL_RELEASE);
87 futex_wake ((int *) &bar->generation, INT_MAX);
89 else
92 do_wait ((int *) &bar->generation, state);
93 while (__atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE) == state);
97 void
98 gomp_barrier_wait (gomp_barrier_t *bar)
100 gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
103 /* Like gomp_barrier_wait, except that if the encountering thread
104 is not the last one to hit the barrier, it returns immediately.
105 The intended usage is that a thread which intends to gomp_barrier_destroy
106 this barrier calls gomp_barrier_wait, while all other threads
107 call gomp_barrier_wait_last. When gomp_barrier_wait returns,
108 the barrier can be safely destroyed. */
110 void
111 gomp_barrier_wait_last (gomp_barrier_t *bar)
113 gomp_barrier_state_t state = gomp_barrier_wait_start (bar);
114 if (state & BAR_WAS_LAST)
115 gomp_barrier_wait_end (bar, state);
118 void
119 gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
121 futex_wake ((int *) &bar->generation, count == 0 ? INT_MAX : count);
124 void
125 gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
127 unsigned int generation, gen;
129 if (__builtin_expect (state & BAR_WAS_LAST, 0))
131 /* Next time we'll be awaiting TOTAL threads again. */
132 struct gomp_thread *thr = gomp_thread ();
133 struct gomp_team *team = thr->ts.team;
135 bar->awaited = bar->total;
136 team->work_share_cancelled = 0;
137 if (__builtin_expect (team->task_count, 0))
139 gomp_barrier_handle_tasks (state);
140 state &= ~BAR_WAS_LAST;
142 else
144 state &= ~BAR_CANCELLED;
145 state += BAR_INCR - BAR_WAS_LAST;
146 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
147 futex_wake ((int *) &bar->generation, INT_MAX);
148 return;
152 generation = state;
153 state &= ~BAR_CANCELLED;
156 do_wait ((int *) &bar->generation, generation);
157 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
158 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
160 gomp_barrier_handle_tasks (state);
161 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
163 generation |= gen & BAR_WAITING_FOR_TASK;
165 while (gen != state + BAR_INCR);
168 void
169 gomp_team_barrier_wait (gomp_barrier_t *bar)
171 gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
174 void
175 gomp_team_barrier_wait_final (gomp_barrier_t *bar)
177 gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
178 if (__builtin_expect (state & BAR_WAS_LAST, 0))
179 bar->awaited_final = bar->total;
180 gomp_team_barrier_wait_end (bar, state);
183 bool
184 gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
185 gomp_barrier_state_t state)
187 unsigned int generation, gen;
189 if (__builtin_expect (state & BAR_WAS_LAST, 0))
191 /* Next time we'll be awaiting TOTAL threads again. */
192 /* BAR_CANCELLED should never be set in state here, because
193 cancellation means that at least one of the threads has been
194 cancelled, thus on a cancellable barrier we should never see
195 all threads to arrive. */
196 struct gomp_thread *thr = gomp_thread ();
197 struct gomp_team *team = thr->ts.team;
199 bar->awaited = bar->total;
200 team->work_share_cancelled = 0;
201 if (__builtin_expect (team->task_count, 0))
203 gomp_barrier_handle_tasks (state);
204 state &= ~BAR_WAS_LAST;
206 else
208 state += BAR_INCR - BAR_WAS_LAST;
209 __atomic_store_n (&bar->generation, state, MEMMODEL_RELEASE);
210 futex_wake ((int *) &bar->generation, INT_MAX);
211 return false;
215 if (__builtin_expect (state & BAR_CANCELLED, 0))
216 return true;
218 generation = state;
221 do_wait ((int *) &bar->generation, generation);
222 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
223 if (__builtin_expect (gen & BAR_CANCELLED, 0))
224 return true;
225 if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
227 gomp_barrier_handle_tasks (state);
228 gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
230 generation |= gen & BAR_WAITING_FOR_TASK;
232 while (gen != state + BAR_INCR);
234 return false;
237 bool
238 gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
240 return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
243 void
244 gomp_team_barrier_cancel (struct gomp_team *team)
246 gomp_mutex_lock (&team->task_lock);
247 if (team->barrier.generation & BAR_CANCELLED)
249 gomp_mutex_unlock (&team->task_lock);
250 return;
252 team->barrier.generation |= BAR_CANCELLED;
253 gomp_mutex_unlock (&team->task_lock);
254 futex_wake ((int *) &team->barrier.generation, INT_MAX);