PR c++/24613
[official-gcc.git] / libgomp / team.c
blob19ad67d61337c54c2302dbc5a9153d1dd951868f
1 /* Copyright (C) 2005 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
19 MA 02111-1307, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the maintainence of threads in response to team
29 creation and termination. */
31 #include "libgomp.h"
32 #include <stdlib.h>
33 #include <string.h>
36 /* This array manages threads spawned from the top level, which will
37 return to the idle loop once the current PARALLEL construct ends. */
38 static struct gomp_thread **gomp_threads;
39 static unsigned gomp_threads_size;
40 static unsigned gomp_threads_used;
42 /* This attribute contains PTHREAD_CREATE_DETACHED. */
43 static pthread_attr_t gomp_thread_attr;
45 /* This barrier holds and releases threads waiting in gomp_threads. */
46 static gomp_barrier_t gomp_threads_dock;
48 /* This is the libgomp per-thread data structure. */
49 #ifdef HAVE_TLS
50 __thread struct gomp_thread gomp_tls_data;
51 #else
52 pthread_key_t gomp_tls_key;
53 #endif
56 /* This structure is used to communicate across pthread_create. */
58 struct gomp_thread_start_data
60 struct gomp_team_state ts;
61 void (*fn) (void *);
62 void *fn_data;
63 bool nested;
67 /* This function is a pthread_create entry point. This contains the idle
68 loop in which a thread waits to be called up to become part of a team. */
70 static void *
71 gomp_thread_start (void *xdata)
73 struct gomp_thread_start_data *data = xdata;
74 struct gomp_thread *thr;
75 void (*local_fn) (void *);
76 void *local_data;
78 #ifdef HAVE_TLS
79 thr = &gomp_tls_data;
80 #else
81 struct gomp_thread local_thr;
82 thr = &local_thr;
83 pthread_setspecific (gomp_tls_key, thr);
84 #endif
85 gomp_sem_init (&thr->release, 0);
87 /* Extract what we need from data. */
88 local_fn = data->fn;
89 local_data = data->fn_data;
90 thr->ts = data->ts;
92 thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
94 if (data->nested)
96 gomp_barrier_wait (&thr->ts.team->barrier);
97 local_fn (local_data);
98 gomp_barrier_wait (&thr->ts.team->barrier);
100 else
102 gomp_threads[thr->ts.team_id] = thr;
104 gomp_barrier_wait (&gomp_threads_dock);
107 struct gomp_team *team;
109 local_fn (local_data);
111 /* Clear out the team and function data. This is a debugging
112 signal that we're in fact back in the dock. */
113 team = thr->ts.team;
114 thr->fn = NULL;
115 thr->data = NULL;
116 thr->ts.team = NULL;
117 thr->ts.work_share = NULL;
118 thr->ts.team_id = 0;
119 thr->ts.work_share_generation = 0;
120 thr->ts.static_trip = 0;
122 gomp_barrier_wait (&team->barrier);
123 gomp_barrier_wait (&gomp_threads_dock);
125 local_fn = thr->fn;
126 local_data = thr->data;
128 while (local_fn);
131 return NULL;
135 /* Create a new team data structure. */
137 static struct gomp_team *
138 new_team (unsigned nthreads, struct gomp_work_share *work_share)
140 struct gomp_team *team;
141 size_t size;
143 size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]);
144 team = gomp_malloc (size);
145 gomp_mutex_init (&team->work_share_lock);
147 team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *));
148 team->generation_mask = 3;
149 team->oldest_live_gen = work_share == NULL;
150 team->num_live_gen = work_share != NULL;
151 team->work_shares[0] = work_share;
153 team->nthreads = nthreads;
154 gomp_barrier_init (&team->barrier, nthreads);
156 gomp_sem_init (&team->master_release, 0);
157 team->ordered_release[0] = &team->master_release;
159 return team;
163 /* Free a team data structure. */
165 static void
166 free_team (struct gomp_team *team)
168 free (team->work_shares);
169 gomp_mutex_destroy (&team->work_share_lock);
170 gomp_barrier_destroy (&team->barrier);
171 gomp_sem_destroy (&team->master_release);
172 free (team);
176 /* Launch a team. */
178 void
179 gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
180 struct gomp_work_share *work_share)
182 struct gomp_thread_start_data *start_data;
183 struct gomp_thread *thr, *nthr;
184 struct gomp_team *team;
185 bool nested;
186 unsigned i, n, old_threads_used = 0;
188 thr = gomp_thread ();
189 nested = thr->ts.team != NULL;
191 team = new_team (nthreads, work_share);
193 /* Always save the previous state, even if this isn't a nested team.
194 In particular, we should save any work share state from an outer
195 orphaned work share construct. */
196 team->prev_ts = thr->ts;
198 thr->ts.team = team;
199 thr->ts.work_share = work_share;
200 thr->ts.team_id = 0;
201 thr->ts.work_share_generation = 0;
202 thr->ts.static_trip = 0;
204 if (nthreads == 1)
205 return;
207 i = 1;
209 /* We only allow the reuse of idle threads for non-nested PARALLEL
210 regions. This appears to be implied by the semantics of
211 threadprivate variables, but perhaps that's reading too much into
212 things. Certainly it does prevent any locking problems, since
213 only the initial program thread will modify gomp_threads. */
214 if (!nested)
216 old_threads_used = gomp_threads_used;
218 if (nthreads <= old_threads_used)
219 n = nthreads;
220 else if (old_threads_used == 0)
222 n = 0;
223 gomp_barrier_init (&gomp_threads_dock, nthreads);
225 else
227 n = old_threads_used;
229 /* Increase the barrier threshold to make sure all new
230 threads arrive before the team is released. */
231 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
234 /* Not true yet, but soon will be. We're going to release all
235 threads from the dock, and those that aren't part of the
236 team will exit. */
237 gomp_threads_used = nthreads;
239 /* Release existing idle threads. */
240 for (; i < n; ++i)
242 nthr = gomp_threads[i];
243 nthr->ts.team = team;
244 nthr->ts.work_share = work_share;
245 nthr->ts.team_id = i;
246 nthr->ts.work_share_generation = 0;
247 nthr->ts.static_trip = 0;
248 nthr->fn = fn;
249 nthr->data = data;
250 team->ordered_release[i] = &nthr->release;
253 if (i == nthreads)
254 goto do_release;
256 /* If necessary, expand the size of the gomp_threads array. It is
257 expected that changes in the number of threads is rare, thus we
258 make no effort to expand gomp_threads_size geometrically. */
259 if (nthreads >= gomp_threads_size)
261 gomp_threads_size = nthreads + 1;
262 gomp_threads
263 = gomp_realloc (gomp_threads,
264 gomp_threads_size
265 * sizeof (struct gomp_thread_data *));
269 start_data = alloca (sizeof (struct gomp_thread_start_data) * (nthreads-i));
271 /* Launch new threads. */
272 for (; i < nthreads; ++i, ++start_data)
274 pthread_t pt;
275 int err;
277 start_data->ts.team = team;
278 start_data->ts.work_share = work_share;
279 start_data->ts.team_id = i;
280 start_data->ts.work_share_generation = 0;
281 start_data->ts.static_trip = 0;
282 start_data->fn = fn;
283 start_data->fn_data = data;
284 start_data->nested = nested;
286 err = pthread_create (&pt, &gomp_thread_attr,
287 gomp_thread_start, start_data);
288 if (err != 0)
289 gomp_fatal ("Thread creation failed: %s", strerror (err));
292 do_release:
293 gomp_barrier_wait (nested ? &team->barrier : &gomp_threads_dock);
295 /* Decrease the barrier threshold to match the number of threads
296 that should arrive back at the end of this team. The extra
297 threads should be exiting. Note that we arrange for this test
298 to never be true for nested teams. */
299 if (nthreads < old_threads_used)
300 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
304 /* Terminate the current team. This is only to be called by the master
305 thread. We assume that we must wait for the other threads. */
307 void
308 gomp_team_end (void)
310 struct gomp_thread *thr = gomp_thread ();
311 struct gomp_team *team = thr->ts.team;
313 gomp_barrier_wait (&team->barrier);
315 thr->ts = team->prev_ts;
317 free_team (team);
321 /* Constructors for this file. */
323 static void __attribute__((constructor))
324 initialize_team (void)
326 struct gomp_thread *thr;
328 #ifndef HAVE_TLS
329 pthread_key_create (&gomp_tls_key, free);
330 #endif
332 #ifdef HAVE_TLS
333 thr = &gomp_tls_data;
334 #else
335 thr = gomp_malloc_cleared (sizeof (*thr));
336 pthread_setspecific (gomp_tls_key, thr);
337 #endif
338 gomp_sem_init (&thr->release, 0);
340 pthread_attr_init (&gomp_thread_attr);
341 pthread_attr_setdetachstate (&gomp_thread_attr, PTHREAD_CREATE_DETACHED);