[patch][Bug middle-end/33472] ICE and invalid rtl sharing with complex on
[official-gcc.git] / libgomp / team.c
blob7d50bfc29af83e3504270fbf7acca1288571926a
1 /* Copyright (C) 2005, 2006, 2007 Free Software Foundation, Inc.
2 Contributed by Richard Henderson <rth@redhat.com>.
4 This file is part of the GNU OpenMP Library (libgomp).
6 Libgomp is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
11 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
14 more details.
16 You should have received a copy of the GNU Lesser General Public License
17 along with libgomp; see the file COPYING.LIB. If not, write to the
18 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301, USA. */
21 /* As a special exception, if you link this library with other files, some
22 of which are compiled with GCC, to produce an executable, this library
23 does not by itself cause the resulting executable to be covered by the
24 GNU General Public License. This exception does not however invalidate
25 any other reasons why the executable file might be covered by the GNU
26 General Public License. */
28 /* This file handles the maintainence of threads in response to team
29 creation and termination. */
31 #include "libgomp.h"
32 #include <stdlib.h>
33 #include <string.h>
35 /* This array manages threads spawned from the top level, which will
36 return to the idle loop once the current PARALLEL construct ends. */
37 static struct gomp_thread **gomp_threads;
38 static unsigned gomp_threads_size;
39 static unsigned gomp_threads_used;
41 /* This attribute contains PTHREAD_CREATE_DETACHED. */
42 pthread_attr_t gomp_thread_attr;
44 /* This barrier holds and releases threads waiting in gomp_threads. */
45 static gomp_barrier_t gomp_threads_dock;
47 /* This is the libgomp per-thread data structure. */
48 #ifdef HAVE_TLS
49 __thread struct gomp_thread gomp_tls_data;
50 #else
51 pthread_key_t gomp_tls_key;
52 #endif
55 /* This structure is used to communicate across pthread_create. */
57 struct gomp_thread_start_data
59 struct gomp_team_state ts;
60 void (*fn) (void *);
61 void *fn_data;
62 bool nested;
66 /* This function is a pthread_create entry point. This contains the idle
67 loop in which a thread waits to be called up to become part of a team. */
69 static void *
70 gomp_thread_start (void *xdata)
72 struct gomp_thread_start_data *data = xdata;
73 struct gomp_thread *thr;
74 void (*local_fn) (void *);
75 void *local_data;
77 #ifdef HAVE_TLS
78 thr = &gomp_tls_data;
79 #else
80 struct gomp_thread local_thr;
81 thr = &local_thr;
82 pthread_setspecific (gomp_tls_key, thr);
83 #endif
84 gomp_sem_init (&thr->release, 0);
86 /* Extract what we need from data. */
87 local_fn = data->fn;
88 local_data = data->fn_data;
89 thr->ts = data->ts;
91 thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
93 if (data->nested)
95 gomp_barrier_wait (&thr->ts.team->barrier);
96 local_fn (local_data);
97 gomp_barrier_wait (&thr->ts.team->barrier);
99 else
101 gomp_threads[thr->ts.team_id] = thr;
103 gomp_barrier_wait (&gomp_threads_dock);
106 struct gomp_team *team;
108 local_fn (local_data);
110 /* Clear out the team and function data. This is a debugging
111 signal that we're in fact back in the dock. */
112 team = thr->ts.team;
113 thr->fn = NULL;
114 thr->data = NULL;
115 thr->ts.team = NULL;
116 thr->ts.work_share = NULL;
117 thr->ts.team_id = 0;
118 thr->ts.work_share_generation = 0;
119 thr->ts.static_trip = 0;
121 gomp_barrier_wait (&team->barrier);
122 gomp_barrier_wait (&gomp_threads_dock);
124 local_fn = thr->fn;
125 local_data = thr->data;
127 while (local_fn);
130 return NULL;
134 /* Create a new team data structure. */
136 static struct gomp_team *
137 new_team (unsigned nthreads, struct gomp_work_share *work_share)
139 struct gomp_team *team;
140 size_t size;
142 size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]);
143 team = gomp_malloc (size);
144 gomp_mutex_init (&team->work_share_lock);
146 team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *));
147 team->generation_mask = 3;
148 team->oldest_live_gen = work_share == NULL;
149 team->num_live_gen = work_share != NULL;
150 team->work_shares[0] = work_share;
152 team->nthreads = nthreads;
153 gomp_barrier_init (&team->barrier, nthreads);
155 gomp_sem_init (&team->master_release, 0);
156 team->ordered_release[0] = &team->master_release;
158 return team;
162 /* Free a team data structure. */
164 static void
165 free_team (struct gomp_team *team)
167 free (team->work_shares);
168 gomp_mutex_destroy (&team->work_share_lock);
169 gomp_barrier_destroy (&team->barrier);
170 gomp_sem_destroy (&team->master_release);
171 free (team);
175 /* Launch a team. */
177 void
178 gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
179 struct gomp_work_share *work_share)
181 struct gomp_thread_start_data *start_data;
182 struct gomp_thread *thr, *nthr;
183 struct gomp_team *team;
184 bool nested;
185 unsigned i, n, old_threads_used = 0;
186 pthread_attr_t thread_attr, *attr;
188 thr = gomp_thread ();
189 nested = thr->ts.team != NULL;
191 team = new_team (nthreads, work_share);
193 /* Always save the previous state, even if this isn't a nested team.
194 In particular, we should save any work share state from an outer
195 orphaned work share construct. */
196 team->prev_ts = thr->ts;
198 thr->ts.team = team;
199 thr->ts.work_share = work_share;
200 thr->ts.team_id = 0;
201 thr->ts.work_share_generation = 0;
202 thr->ts.static_trip = 0;
204 if (nthreads == 1)
205 return;
207 i = 1;
209 /* We only allow the reuse of idle threads for non-nested PARALLEL
210 regions. This appears to be implied by the semantics of
211 threadprivate variables, but perhaps that's reading too much into
212 things. Certainly it does prevent any locking problems, since
213 only the initial program thread will modify gomp_threads. */
214 if (!nested)
216 old_threads_used = gomp_threads_used;
218 if (nthreads <= old_threads_used)
219 n = nthreads;
220 else if (old_threads_used == 0)
222 n = 0;
223 gomp_barrier_init (&gomp_threads_dock, nthreads);
225 else
227 n = old_threads_used;
229 /* Increase the barrier threshold to make sure all new
230 threads arrive before the team is released. */
231 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
234 /* Not true yet, but soon will be. We're going to release all
235 threads from the dock, and those that aren't part of the
236 team will exit. */
237 gomp_threads_used = nthreads;
239 /* Release existing idle threads. */
240 for (; i < n; ++i)
242 nthr = gomp_threads[i];
243 nthr->ts.team = team;
244 nthr->ts.work_share = work_share;
245 nthr->ts.team_id = i;
246 nthr->ts.work_share_generation = 0;
247 nthr->ts.static_trip = 0;
248 nthr->fn = fn;
249 nthr->data = data;
250 team->ordered_release[i] = &nthr->release;
253 if (i == nthreads)
254 goto do_release;
256 /* If necessary, expand the size of the gomp_threads array. It is
257 expected that changes in the number of threads is rare, thus we
258 make no effort to expand gomp_threads_size geometrically. */
259 if (nthreads >= gomp_threads_size)
261 gomp_threads_size = nthreads + 1;
262 gomp_threads
263 = gomp_realloc (gomp_threads,
264 gomp_threads_size
265 * sizeof (struct gomp_thread_data *));
269 attr = &gomp_thread_attr;
270 if (gomp_cpu_affinity != NULL)
272 size_t stacksize;
273 pthread_attr_init (&thread_attr);
274 pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED);
275 if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize))
276 pthread_attr_setstacksize (&thread_attr, stacksize);
277 attr = &thread_attr;
280 start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
281 * (nthreads-i));
283 /* Launch new threads. */
284 for (; i < nthreads; ++i, ++start_data)
286 pthread_t pt;
287 int err;
289 start_data->ts.team = team;
290 start_data->ts.work_share = work_share;
291 start_data->ts.team_id = i;
292 start_data->ts.work_share_generation = 0;
293 start_data->ts.static_trip = 0;
294 start_data->fn = fn;
295 start_data->fn_data = data;
296 start_data->nested = nested;
298 if (gomp_cpu_affinity != NULL)
299 gomp_init_thread_affinity (attr);
301 err = pthread_create (&pt, attr, gomp_thread_start, start_data);
302 if (err != 0)
303 gomp_fatal ("Thread creation failed: %s", strerror (err));
306 if (gomp_cpu_affinity != NULL)
307 pthread_attr_destroy (&thread_attr);
309 do_release:
310 gomp_barrier_wait (nested ? &team->barrier : &gomp_threads_dock);
312 /* Decrease the barrier threshold to match the number of threads
313 that should arrive back at the end of this team. The extra
314 threads should be exiting. Note that we arrange for this test
315 to never be true for nested teams. */
316 if (nthreads < old_threads_used)
317 gomp_barrier_reinit (&gomp_threads_dock, nthreads);
321 /* Terminate the current team. This is only to be called by the master
322 thread. We assume that we must wait for the other threads. */
324 void
325 gomp_team_end (void)
327 struct gomp_thread *thr = gomp_thread ();
328 struct gomp_team *team = thr->ts.team;
330 gomp_barrier_wait (&team->barrier);
332 thr->ts = team->prev_ts;
334 free_team (team);
338 /* Constructors for this file. */
340 static void __attribute__((constructor))
341 initialize_team (void)
343 struct gomp_thread *thr;
345 #ifndef HAVE_TLS
346 static struct gomp_thread initial_thread_tls_data;
348 pthread_key_create (&gomp_tls_key, NULL);
349 pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
350 #endif
352 #ifdef HAVE_TLS
353 thr = &gomp_tls_data;
354 #else
355 thr = &initial_thread_tls_data;
356 #endif
357 gomp_sem_init (&thr->release, 0);