re PR c++/88744 (class non-type template parameters doesn't work with default templat...
[official-gcc.git] / libgomp / work.c
blob22adb8b9fcd14481be19cadbf42980445301658f
1 /* Copyright (C) 2005-2019 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 /* This file contains routines to manage the work-share queue for a team
27 of threads. */
29 #include "libgomp.h"
30 #include <stddef.h>
31 #include <stdlib.h>
32 #include <string.h>
35 /* Allocate a new work share structure, preferably from current team's
36 free gomp_work_share cache. */
38 static struct gomp_work_share *
39 alloc_work_share (struct gomp_team *team)
41 struct gomp_work_share *ws;
42 unsigned int i;
44 /* This is called in a critical section. */
45 if (team->work_share_list_alloc != NULL)
47 ws = team->work_share_list_alloc;
48 team->work_share_list_alloc = ws->next_free;
49 return ws;
52 #ifdef HAVE_SYNC_BUILTINS
53 ws = team->work_share_list_free;
54 /* We need atomic read from work_share_list_free,
55 as free_work_share can be called concurrently. */
56 __asm ("" : "+r" (ws));
58 if (ws && ws->next_free)
60 struct gomp_work_share *next = ws->next_free;
61 ws->next_free = NULL;
62 team->work_share_list_alloc = next->next_free;
63 return next;
65 #else
66 gomp_mutex_lock (&team->work_share_list_free_lock);
67 ws = team->work_share_list_free;
68 if (ws)
70 team->work_share_list_alloc = ws->next_free;
71 team->work_share_list_free = NULL;
72 gomp_mutex_unlock (&team->work_share_list_free_lock);
73 return ws;
75 gomp_mutex_unlock (&team->work_share_list_free_lock);
76 #endif
78 team->work_share_chunk *= 2;
79 /* Allocating gomp_work_share structures aligned is just an
80 optimization, don't do it when using the fallback method. */
81 #ifdef GOMP_HAVE_EFFICIENT_ALIGNED_ALLOC
82 ws = gomp_aligned_alloc (__alignof (struct gomp_work_share),
83 team->work_share_chunk
84 * sizeof (struct gomp_work_share));
85 #else
86 ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share));
87 #endif
88 ws->next_alloc = team->work_shares[0].next_alloc;
89 team->work_shares[0].next_alloc = ws;
90 team->work_share_list_alloc = &ws[1];
91 for (i = 1; i < team->work_share_chunk - 1; i++)
92 ws[i].next_free = &ws[i + 1];
93 ws[i].next_free = NULL;
94 return ws;
97 /* Initialize an already allocated struct gomp_work_share.
98 This shouldn't touch the next_alloc field. */
100 void
101 gomp_init_work_share (struct gomp_work_share *ws, size_t ordered,
102 unsigned nthreads)
104 gomp_mutex_init (&ws->lock);
105 if (__builtin_expect (ordered, 0))
107 #define INLINE_ORDERED_TEAM_IDS_SIZE \
108 (sizeof (struct gomp_work_share) \
109 - offsetof (struct gomp_work_share, inline_ordered_team_ids))
111 if (__builtin_expect (ordered != 1, 0))
113 ordered += nthreads * sizeof (*ws->ordered_team_ids) - 1;
114 ordered = ordered + __alignof__ (long long) - 1;
115 ordered &= ~(__alignof__ (long long) - 1);
117 else
118 ordered = nthreads * sizeof (*ws->ordered_team_ids);
119 if (ordered > INLINE_ORDERED_TEAM_IDS_SIZE)
120 ws->ordered_team_ids = gomp_malloc (ordered);
121 else
122 ws->ordered_team_ids = ws->inline_ordered_team_ids;
123 memset (ws->ordered_team_ids, '\0', ordered);
124 ws->ordered_num_used = 0;
125 ws->ordered_owner = -1;
126 ws->ordered_cur = 0;
128 else
129 ws->ordered_team_ids = ws->inline_ordered_team_ids;
130 gomp_ptrlock_init (&ws->next_ws, NULL);
131 ws->threads_completed = 0;
134 /* Do any needed destruction of gomp_work_share fields before it
135 is put back into free gomp_work_share cache or freed. */
137 void
138 gomp_fini_work_share (struct gomp_work_share *ws)
140 gomp_mutex_destroy (&ws->lock);
141 if (ws->ordered_team_ids != ws->inline_ordered_team_ids)
142 free (ws->ordered_team_ids);
143 gomp_ptrlock_destroy (&ws->next_ws);
146 /* Free a work share struct, if not orphaned, put it into current
147 team's free gomp_work_share cache. */
149 static inline void
150 free_work_share (struct gomp_team *team, struct gomp_work_share *ws)
152 gomp_fini_work_share (ws);
153 if (__builtin_expect (team == NULL, 0))
154 free (ws);
155 else
157 struct gomp_work_share *next_ws;
158 #ifdef HAVE_SYNC_BUILTINS
161 next_ws = team->work_share_list_free;
162 ws->next_free = next_ws;
164 while (!__sync_bool_compare_and_swap (&team->work_share_list_free,
165 next_ws, ws));
166 #else
167 gomp_mutex_lock (&team->work_share_list_free_lock);
168 next_ws = team->work_share_list_free;
169 ws->next_free = next_ws;
170 team->work_share_list_free = ws;
171 gomp_mutex_unlock (&team->work_share_list_free_lock);
172 #endif
176 /* The current thread is ready to begin the next work sharing construct.
177 In all cases, thr->ts.work_share is updated to point to the new
178 structure. In all cases the work_share lock is locked. Return true
179 if this was the first thread to reach this point. */
181 bool
182 gomp_work_share_start (size_t ordered)
184 struct gomp_thread *thr = gomp_thread ();
185 struct gomp_team *team = thr->ts.team;
186 struct gomp_work_share *ws;
188 /* Work sharing constructs can be orphaned. */
189 if (team == NULL)
191 ws = gomp_malloc (sizeof (*ws));
192 gomp_init_work_share (ws, ordered, 1);
193 thr->ts.work_share = ws;
194 return true;
197 ws = thr->ts.work_share;
198 thr->ts.last_work_share = ws;
199 ws = gomp_ptrlock_get (&ws->next_ws);
200 if (ws == NULL)
202 /* This thread encountered a new ws first. */
203 struct gomp_work_share *ws = alloc_work_share (team);
204 gomp_init_work_share (ws, ordered, team->nthreads);
205 thr->ts.work_share = ws;
206 return true;
208 else
210 thr->ts.work_share = ws;
211 return false;
215 /* The current thread is done with its current work sharing construct.
216 This version does imply a barrier at the end of the work-share. */
218 void
219 gomp_work_share_end (void)
221 struct gomp_thread *thr = gomp_thread ();
222 struct gomp_team *team = thr->ts.team;
223 gomp_barrier_state_t bstate;
225 /* Work sharing constructs can be orphaned. */
226 if (team == NULL)
228 free_work_share (NULL, thr->ts.work_share);
229 thr->ts.work_share = NULL;
230 return;
233 bstate = gomp_barrier_wait_start (&team->barrier);
235 if (gomp_barrier_last_thread (bstate))
237 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
239 team->work_shares_to_free = thr->ts.work_share;
240 free_work_share (team, thr->ts.last_work_share);
244 gomp_team_barrier_wait_end (&team->barrier, bstate);
245 thr->ts.last_work_share = NULL;
248 /* The current thread is done with its current work sharing construct.
249 This version implies a cancellable barrier at the end of the work-share. */
251 bool
252 gomp_work_share_end_cancel (void)
254 struct gomp_thread *thr = gomp_thread ();
255 struct gomp_team *team = thr->ts.team;
256 gomp_barrier_state_t bstate;
258 /* Cancellable work sharing constructs cannot be orphaned. */
259 bstate = gomp_barrier_wait_cancel_start (&team->barrier);
261 if (gomp_barrier_last_thread (bstate))
263 if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
265 team->work_shares_to_free = thr->ts.work_share;
266 free_work_share (team, thr->ts.last_work_share);
269 thr->ts.last_work_share = NULL;
271 return gomp_team_barrier_wait_cancel_end (&team->barrier, bstate);
274 /* The current thread is done with its current work sharing construct.
275 This version does NOT imply a barrier at the end of the work-share. */
277 void
278 gomp_work_share_end_nowait (void)
280 struct gomp_thread *thr = gomp_thread ();
281 struct gomp_team *team = thr->ts.team;
282 struct gomp_work_share *ws = thr->ts.work_share;
283 unsigned completed;
285 /* Work sharing constructs can be orphaned. */
286 if (team == NULL)
288 free_work_share (NULL, ws);
289 thr->ts.work_share = NULL;
290 return;
293 if (__builtin_expect (thr->ts.last_work_share == NULL, 0))
294 return;
296 #ifdef HAVE_SYNC_BUILTINS
297 completed = __sync_add_and_fetch (&ws->threads_completed, 1);
298 #else
299 gomp_mutex_lock (&ws->lock);
300 completed = ++ws->threads_completed;
301 gomp_mutex_unlock (&ws->lock);
302 #endif
304 if (completed == team->nthreads)
306 team->work_shares_to_free = thr->ts.work_share;
307 free_work_share (team, thr->ts.last_work_share);
309 thr->ts.last_work_share = NULL;