* native/jni/gconf-peer/Makefile.in: Rebuilt.
[official-gcc.git] / libgomp / team.c
blob060f4ea2c6b7ac69efa5154d6d9d414a216a96de
1 /* Copyright (C) 2005 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the maintainence of threads in response to team
29 creation and termination. */
31 #include "libgomp.h"
32 #include <stdlib.h>
33 #include <string.h>
35 /* This array manages threads spawned from the top level, which will
36 return to the idle loop once the current PARALLEL construct ends. */
37 static struct gomp_thread **gomp_threads;
38 static unsigned gomp_threads_size;
39 static unsigned gomp_threads_used;
41 /* This attribute contains PTHREAD_CREATE_DETACHED. */
42 pthread_attr_t gomp_thread_attr;
44 /* This barrier holds and releases threads waiting in gomp_threads. */
45 static gomp_barrier_t gomp_threads_dock;
47 /* This is the libgomp per-thread data structure. */
48 #ifdef HAVE_TLS
49 __thread struct gomp_thread gomp_tls_data;
50 #else
51 pthread_key_t gomp_tls_key;
52 #endif
55 /* This structure is used to communicate across pthread_create. */
57 struct gomp_thread_start_data
59 struct gomp_team_state ts;
60 void (*fn) (void *);
61 void *fn_data;
62 bool nested;
66 /* This function is a pthread_create entry point. This contains the idle
67 loop in which a thread waits to be called up to become part of a team. */
69 static void *
70 gomp_thread_start (void *xdata)
72 struct gomp_thread_start_data *data = xdata;
73 struct gomp_thread *thr;
74 void (*local_fn) (void *);
75 void *local_data;
77 #ifdef HAVE_TLS
78 thr = &gomp_tls_data;
79 #else
80 struct gomp_thread local_thr;
81 thr = &local_thr;
82 pthread_setspecific (gomp_tls_key, thr);
83 #endif
84 gomp_sem_init (&thr->release, 0);
86 /* Extract what we need from data. */
87 local_fn = data->fn;
88 local_data = data->fn_data;
89 thr->ts = data->ts;
91 thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
93 if (data->nested)
95 gomp_barrier_wait (&thr->ts.team->barrier);
96 local_fn (local_data);
97 gomp_barrier_wait (&thr->ts.team->barrier);
99 else
101 gomp_threads[thr->ts.team_id] = thr;
103 gomp_barrier_wait (&gomp_threads_dock);
106 struct gomp_team *team;
108 local_fn (local_data);
110 /* Clear out the team and function data. This is a debugging
111 signal that we're in fact back in the dock. */
112 team = thr->ts.team;
113 thr->fn = NULL;
114 thr->data = NULL;
115 thr->ts.team = NULL;
116 thr->ts.work_share = NULL;
117 thr->ts.team_id = 0;
118 thr->ts.work_share_generation = 0;
119 thr->ts.static_trip = 0;
121 gomp_barrier_wait (&team->barrier);
122 gomp_barrier_wait (&gomp_threads_dock);
124 local_fn = thr->fn;
125 local_data = thr->data;
127 while (local_fn);
130 return NULL;
134 /* Create a new team data structure. */
136 static struct gomp_team *
137 new_team (unsigned nthreads, struct gomp_work_share *work_share)
139 struct gomp_team *team;
140 size_t size;
142 size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]);
143 team = gomp_malloc (size);
144 gomp_mutex_init (&team->work_share_lock);
146 team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *));
147 team->generation_mask = 3;
148 team->oldest_live_gen = work_share == NULL;
149 team->num_live_gen = work_share != NULL;
150 team->work_shares[0] = work_share;
152 team->nthreads = nthreads;
153 gomp_barrier_init (&team->barrier, nthreads);
155 gomp_sem_init (&team->master_release, 0);
156 team->ordered_release[0] = &team->master_release;
158 return team;
162 /* Free a team data structure. */
164 static void
165 free_team (struct gomp_team *team)
167 free (team->work_shares);
168 gomp_mutex_destroy (&team->work_share_lock);
169 gomp_barrier_destroy (&team->barrier);
170 gomp_sem_destroy (&team->master_release);
171 free (team);
175 /* Launch a team. */
177 void
178 gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
179 struct gomp_work_share *work_share)
181 struct gomp_thread_start_data *start_data;
182 struct gomp_thread *thr, *nthr;
183 struct gomp_team *team;
184 bool nested;
185 unsigned i, n, old_threads_used = 0;
187 thr = gomp_thread ();
188 nested = thr->ts.team != NULL;
190 team = new_team (nthreads, work_share);
192 /* Always save the previous state, even if this isn't a nested team.
193 In particular, we should save any work share state from an outer
194 orphaned work share construct. */
195 team->prev_ts = thr->ts;
197 thr->ts.team = team;
198 thr->ts.work_share = work_share;
199 thr->ts.team_id = 0;
200 thr->ts.work_share_generation = 0;
201 thr->ts.static_trip = 0;
203 if (nthreads == 1)
204 return;
206 i = 1;
208 /* We only allow the reuse of idle threads for non-nested PARALLEL
209 regions. This appears to be implied by the semantics of
210 threadprivate variables, but perhaps that's reading too much into
211 things. Certainly it does prevent any locking problems, since
212 only the initial program thread will modify gomp_threads. */
213 if (!nested)
215 old_threads_used = gomp_threads_used;
217 if (nthreads <= old_threads_used)
218 n = nthreads;
219 else if (old_threads_used == 0)
221 n = 0;
222 gomp_barrier_init (&gomp_threads_dock, nthreads);
224 else
226 n = old_threads_used;
228 /* Increase the barrier threshold to make sure all new
229 threads arrive before the team is released. */
230 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
233 /* Not true yet, but soon will be. We're going to release all
234 threads from the dock, and those that aren't part of the
235 team will exit. */
236 gomp_threads_used = nthreads;
238 /* Release existing idle threads. */
239 for (; i < n; ++i)
241 nthr = gomp_threads[i];
242 nthr->ts.team = team;
243 nthr->ts.work_share = work_share;
244 nthr->ts.team_id = i;
245 nthr->ts.work_share_generation = 0;
246 nthr->ts.static_trip = 0;
247 nthr->fn = fn;
248 nthr->data = data;
249 team->ordered_release[i] = &nthr->release;
252 if (i == nthreads)
253 goto do_release;
255 /* If necessary, expand the size of the gomp_threads array. It is
256 expected that changes in the number of threads is rare, thus we
257 make no effort to expand gomp_threads_size geometrically. */
258 if (nthreads >= gomp_threads_size)
260 gomp_threads_size = nthreads + 1;
261 gomp_threads
262 = gomp_realloc (gomp_threads,
263 gomp_threads_size
264 * sizeof (struct gomp_thread_data *));
268 start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
269 * (nthreads-i));
271 /* Launch new threads. */
272 for (; i < nthreads; ++i, ++start_data)
274 pthread_t pt;
275 int err;
277 start_data->ts.team = team;
278 start_data->ts.work_share = work_share;
279 start_data->ts.team_id = i;
280 start_data->ts.work_share_generation = 0;
281 start_data->ts.static_trip = 0;
282 start_data->fn = fn;
283 start_data->fn_data = data;
284 start_data->nested = nested;
286 err = pthread_create (&pt, &gomp_thread_attr,
287 gomp_thread_start, start_data);
288 if (err != 0)
289 gomp_fatal ("Thread creation failed: %s", strerror (err));
292 do_release:
293 gomp_barrier_wait (nested ? &team->barrier : &gomp_threads_dock);
295 /* Decrease the barrier threshold to match the number of threads
296 that should arrive back at the end of this team. The extra
297 threads should be exiting. Note that we arrange for this test
298 to never be true for nested teams. */
299 if (nthreads < old_threads_used)
300 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
304 /* Terminate the current team. This is only to be called by the master
305 thread. We assume that we must wait for the other threads. */
307 void
308 gomp_team_end (void)
310 struct gomp_thread *thr = gomp_thread ();
311 struct gomp_team *team = thr->ts.team;
313 gomp_barrier_wait (&team->barrier);
315 thr->ts = team->prev_ts;
317 free_team (team);
321 /* Constructors for this file. */
323 static void __attribute__((constructor))
324 initialize_team (void)
326 struct gomp_thread *thr;
328 #ifndef HAVE_TLS
329 static struct gomp_thread initial_thread_tls_data;
331 pthread_key_create (&gomp_tls_key, NULL);
332 pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
333 #endif
335 #ifdef HAVE_TLS
336 thr = &gomp_tls_data;
337 #else
338 thr = &initial_thread_tls_data;
339 #endif
340 gomp_sem_init (&thr->release, 0);