* config/i386/mm3dnow.h: New.
[official-gcc.git] / libmudflap / mf-hooks3.c
blob00fb3728e3687575b1b3e1946560e68d2af5bb38
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
33 #include "config.h"
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
39 /* These attempt to coax various unix flavours to declare all our
40 needed tidbits in the system headers. */
41 #if !defined(__FreeBSD__) && !defined(__APPLE__)
42 #define _POSIX_SOURCE
43 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
44 #define _GNU_SOURCE
45 #define _XOPEN_SOURCE
46 #define _BSD_TYPES
47 #define __EXTENSIONS__
48 #define _ALL_SOURCE
49 #define _LARGE_FILE_API
50 #define _XOPEN_SOURCE_EXTENDED 1
52 #include <string.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <sys/types.h>
56 #include <sys/mman.h>
57 #include <unistd.h>
58 #include <assert.h>
59 #include <errno.h>
60 #include <limits.h>
61 #include <sched.h>
62 #include <fcntl.h>
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
72 /* Multithreading support hooks. */
76 #ifndef LIBMUDFLAPTH
77 #error "pthreadstuff is to be included only in libmudflapth"
78 #endif
82 /* Describe a thread (dead or alive). */
83 struct pthread_info
85 short used_p; /* Is this slot in use? */
86 short dead_p; /* Is this thread dead? */
87 pthread_t self; /* The thread id. */
89 /* If libmudflapth allocated the stack, store its adjusted base/size. */
90 void *stack;
91 size_t stack_size;
92 /* The _alloc fields store unadjusted values from the moment of allocation. */
93 void *stack_alloc;
94 size_t stack_size_alloc;
96 int *thread_errno;
97 enum __mf_state_enum state;
101 /* Describe the startup information for a new user thread. */
102 struct pthread_start_info
104 /* The user's thread entry point and argument. */
105 void * (*user_fn)(void *);
106 void *user_arg;
108 /* Set by user thread when this startup struct may be disposed of. */
109 struct pthread_info *thread_info;
115 /* To avoid dynamic memory allocation, use static array to store these
116 thread description structs. The second (_idx) array is used as a
117 simple caching hash table, mapping PTHREAD_HASH(thread) to its
118 index in __mf_pthread_info[]. */
120 #define LIBMUDFLAPTH_THREADS_MAX 1024
121 static struct pthread_info __mf_pthread_info[LIBMUDFLAPTH_THREADS_MAX];
122 static unsigned __mf_pthread_info_idx[LIBMUDFLAPTH_THREADS_MAX];
123 #define PTHREAD_HASH(p) ((unsigned) (p) % LIBMUDFLAPTH_THREADS_MAX)
126 /* Find any old empty entry in __mf_pthread_info; mark it used and
127 return it. Return NULL if there are no more available slots. */
128 struct pthread_info*
129 __mf_allocate_blank_threadinfo (unsigned* idx)
131 static unsigned probe = LIBMUDFLAPTH_THREADS_MAX-1;
132 unsigned probe_at_start = probe;
133 static pthread_mutex_t mutex =
134 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
135 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
136 #else
137 PTHREAD_MUTEX_INITIALIZER;
138 #endif
139 int rc;
141 rc = pthread_mutex_lock (& mutex);
142 assert (rc == 0);
144 /* Look for a blank spot starting one past the last one we found. */
147 probe = (probe + 1) % LIBMUDFLAPTH_THREADS_MAX;
148 struct pthread_info* pi = & __mf_pthread_info [probe];
149 if (! pi->used_p)
151 /* memset (pi, 0, sizeof (*pi)); */
152 pi->used_p = 1;
153 if (idx != NULL) *idx = probe;
154 /* VERBOSE_TRACE ("allocated threadinfo slot %u\n", probe); */
155 rc = pthread_mutex_unlock (& mutex);
156 assert (rc == 0);
157 return pi;
160 while (probe != probe_at_start);
162 rc = pthread_mutex_unlock (& mutex);
163 assert (rc == 0);
164 return NULL;
168 /* Find and return the pthread_info struct for the current thread.
169 There might already be one in __mf_pthread_info for this thread, in
170 which case return it. There may not be one (if this is a main
171 thread, an auxiliary -lpthread manager, or an actual user thread
172 making an early call into libmudflap. In these cases, create a new
173 entry. If not it's not the main thread, put it into reentrant
174 initial state.
176 static struct pthread_info*
177 __mf_find_threadinfo ()
179 pthread_t it = pthread_self ();
180 unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
181 struct pthread_info *result = NULL;
182 static pthread_t last;
183 static int main_thread_seen_p;
185 /* Check out the lookup cache; failing that, do a linear search
186 around the table. */
188 struct pthread_info* pi = & __mf_pthread_info [*hash];
189 unsigned i;
191 if (pi->used_p && pi->self == it)
192 result = pi;
193 else for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
195 struct pthread_info* pi2 = & __mf_pthread_info [i];
196 if (pi2->used_p && pi2->self == it)
198 *hash = i;
199 result = pi2;
200 break;
205 if (result == NULL)
207 /* Create a __mf_pthread_info record for the main thread. It's
208 different from the auto-recognized worker bees because for
209 example we can assume that it's a fully stack/errno-equipped
210 thread. */
212 /* This must be the main thread, until now unseen in libmudflap. */
213 unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
214 struct pthread_info* pi = __mf_allocate_blank_threadinfo (hash);
215 assert (pi != NULL);
216 assert (pi->used_p);
217 result = pi;
218 result->self = it;
220 if (! main_thread_seen_p)
222 result->state = active;
223 /* NB: leave result->thread_errno unset, as main thread's errno
224 has already been registered in __mf_init. */
225 /* NB: leave stack-related fields unset, to avoid
226 deallocation. */
227 main_thread_seen_p = 1;
228 VERBOSE_TRACE ("identified self as main thread\n");
230 else
232 result->state = reentrant;
233 /* NB: leave result->thread_errno unset, as worker thread's
234 errno is unlikely to be used, and user threads fill them
235 in during __mf_pthread_spawn(). */
236 /* NB: leave stack-related fields unset, leaving pthread_create
237 to fill them in for user threads, leaving them empty for
238 other threads. */
239 VERBOSE_TRACE ("identified self as new aux or user thread\n");
243 if (last != it)
245 VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
246 (unsigned) it,
247 (unsigned) *hash);
248 last = it;
251 assert (result != NULL);
252 assert (result->self == it);
254 return result;
259 /* Return a pointer to the per-thread __mf_state variable. */
260 enum __mf_state_enum *
261 __mf_state_perthread ()
263 assert (! __mf_starting_p);
264 return & (__mf_find_threadinfo()->state);
268 static void
269 __mf_pthread_cleanup (void *arg)
271 struct pthread_info *pi = arg;
273 /* XXX: This unregistration is not safe on platforms where distinct
274 threads share errno (or at least its virtual address). */
275 if (pi->thread_errno != NULL)
276 __mf_unregister (pi->thread_errno, sizeof (int), __MF_TYPE_GUESS);
278 /* XXX: Only detached threads should designate themselves as dead
279 here. Non-detached threads are marked dead after their
280 personalized pthread_join() call. */
281 pi->state = reentrant;
282 pi->dead_p = 1;
284 VERBOSE_TRACE ("thread pi %p exiting\n", pi);
288 static void *
289 __mf_pthread_spawner (void *arg)
291 struct pthread_info *pi = __mf_find_threadinfo ();
292 void *result = NULL;
294 /* Turn off reentrancy indications. */
295 assert (pi->state == reentrant);
296 pi->state = active;
298 VERBOSE_TRACE ("new user thread\n");
300 if (__mf_opts.heur_std_data)
302 pi->thread_errno = & errno;
303 __mf_register (pi->thread_errno, sizeof (int),
304 __MF_TYPE_GUESS, "errno area (thread)");
305 /* NB: we could use __MF_TYPE_STATIC above, but we guess that
306 the thread errno is coming out of some dynamically allocated
307 pool that we already know of as __MF_TYPE_HEAP. */
310 /* We considered using pthread_key_t objects instead of these
311 cleanup stacks, but they were less cooperative with the
312 interposed malloc hooks in libmudflap. */
313 pthread_cleanup_push (& __mf_pthread_cleanup, pi);
315 /* Call user thread */
317 /* Extract given entry point and argument. */
318 struct pthread_start_info *psi = arg;
319 void * (*user_fn)(void *) = psi->user_fn;
320 void *user_arg = psi->user_arg;
322 /* Signal the main thread to resume. */
323 psi->thread_info = pi;
325 result = (*user_fn)(user_arg);
328 pthread_cleanup_pop (1 /* execute */);
330 /* NB: there is a slight race here. The pthread_info field will now
331 say this thread is dead, but it may still be running .. right
332 here. We try to check for this possibility using the
333 pthread_kill test below. */
335 return result;
339 #if PIC
340 /* A special bootstrap variant. */
342 __mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
343 void * (*start) (void *), void *arg)
345 return -1;
347 #endif
350 #undef pthread_create
351 WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
352 void * (*start) (void *), void *arg)
354 DECLARE(int, munmap, void *p, size_t l);
355 DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
356 DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
357 void * (*start) (void *), void *arg);
358 int result;
359 pthread_attr_t override_attr;
360 void *override_stack;
361 size_t override_stacksize;
362 void *override_stack_alloc = (void *) 0;
363 size_t override_stacksize_alloc = 0;
364 unsigned i;
366 TRACE ("pthread_create\n");
368 /* Garbage-collect dead threads' stacks. */
369 LOCKTH ();
370 for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
372 struct pthread_info *pi = & __mf_pthread_info [i];
373 if (! pi->used_p)
374 continue;
375 if (! pi->dead_p)
376 continue;
378 /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
379 (unsigned) pi->self, pi, pi->dead_p); */
381 /* Delay actual deallocation by a few cycles, try to discourage the
382 race mentioned at the end of __mf_pthread_spawner(). */
383 if (pi->dead_p)
384 pi->dead_p ++;
385 if (pi->dead_p >= 10 /* XXX */)
387 if (pi->stack)
388 CALL_REAL (munmap, pi->stack_alloc, pi->stack_size_alloc);
390 VERBOSE_TRACE ("slot %u freed, stack %p\n", i, pi->stack_alloc);
391 memset (pi, 0, sizeof (*pi));
393 /* One round of garbage collection is enough. */
394 break;
397 UNLOCKTH ();
399 /* Let's allocate a stack for this thread, if one is not already
400 supplied by the caller. We don't want to let e.g. the
401 linuxthreads manager thread do this allocation. */
402 if (attr != NULL)
403 override_attr = *attr;
404 else
405 pthread_attr_init (& override_attr);
407 /* Get supplied attributes, if any. */
408 /* XXX: consider using POSIX2K attr_getstack() */
409 if (pthread_attr_getstackaddr (& override_attr, & override_stack) != 0 ||
410 pthread_attr_getstacksize (& override_attr, & override_stacksize) != 0)
412 override_stack = NULL;
413 override_stacksize = 0;
416 /* Do we need to allocate the new thread's stack? */
417 if (__mf_opts.thread_stack && override_stack == NULL)
419 uintptr_t alignment = 256; /* power of two */
421 /* Perturb the initial stack addresses slightly, to encourage
422 threads to have nonconflicting entries in the lookup cache
423 for their tracked stack objects. */
424 static unsigned perturb = 0;
425 const unsigned perturb_delta = 32;
426 const unsigned perturb_count = 16;
427 perturb += perturb_delta;
428 if (perturb > perturb_delta*perturb_count) perturb = 0;
430 /* Use glibc x86 defaults */
431 /* Should have been defined in <limits.h> */
432 #ifndef PTHREAD_STACK_MIN
433 #define PTHREAD_STACK_MIN 65536
434 #endif
435 override_stacksize = max (PTHREAD_STACK_MIN, __mf_opts.thread_stack * 1024);
438 #if defined(MAP_ANONYMOUS)
439 #define MF_MAP_ANON MAP_ANONYMOUS
440 #elif defined(MAP_ANON)
441 #define MF_MAP_ANON MAP_ANON
442 #endif
444 #ifndef MAP_FAILED
445 #define MAP_FAILED ((void *) -1)
446 #endif
448 #ifdef MF_MAP_ANON
449 override_stack = CALL_REAL (mmap, NULL, override_stacksize,
450 PROT_READ|PROT_WRITE,
451 MAP_PRIVATE|MF_MAP_ANON,
452 0, 0);
453 #else
454 /* Try mapping /dev/zero instead. */
456 static int zerofd = -1;
457 if (zerofd == -1)
458 zerofd = open ("/dev/zero", O_RDWR);
459 if (zerofd == -1)
460 override_stack = MAP_FAILED;
461 else
462 override_stack = CALL_REAL (mmap, NULL, override_stacksize,
463 PROT_READ|PROT_WRITE,
464 MAP_PRIVATE, zerofd, 0);
466 #endif
468 if (override_stack == 0 || override_stack == MAP_FAILED)
470 errno = EAGAIN;
471 return -1;
474 VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
475 override_stack, (unsigned long) override_stacksize);
477 /* Save the original allocated values for later deallocation. */
478 override_stack_alloc = override_stack;
479 override_stacksize_alloc = override_stacksize;
481 /* The stackaddr pthreads attribute is a candidate stack pointer.
482 It must point near the top or the bottom of this buffer, depending
483 on whether stack grows downward or upward, and suitably aligned.
484 On the x86, it grows down, so we set stackaddr near the top. */
485 /* XXX: port logic */
486 override_stack = (void *)
487 (((uintptr_t) override_stack + override_stacksize - alignment - perturb)
488 & (~(uintptr_t)(alignment-1)));
490 /* XXX: consider using POSIX2K attr_setstack() */
491 if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
492 pthread_attr_setstacksize (& override_attr,
493 override_stacksize - alignment - perturb) != 0)
495 /* This should not happen. */
496 CALL_REAL (munmap, override_stack, override_stacksize);
497 errno = EAGAIN;
498 return -1;
502 /* Actually start the child thread. */
504 struct pthread_start_info psi;
505 struct pthread_info *pi = NULL;
507 /* Fill in startup-control fields. */
508 psi.user_fn = start;
509 psi.user_arg = arg;
510 psi.thread_info = NULL;
512 /* Actually create the thread. */
513 __mf_state = reentrant;
514 result = CALL_REAL (pthread_create, thr, & override_attr,
515 & __mf_pthread_spawner, (void *) & psi);
516 __mf_state = active;
517 /* We also hook pthread_join/pthread_exit to get into reentrant
518 mode during thread shutdown/cleanup. */
520 /* Wait until child thread has progressed far enough into its
521 __mf_pthread_spawner() call. */
522 while (1) /* XXX: timeout? */
524 volatile struct pthread_start_info *psip = & psi;
525 pi = psip->thread_info;
526 if (pi != NULL)
527 break;
528 sched_yield ();
531 /* Fill in remaining fields in pthread_info. */
532 pi->stack = override_stack;
533 pi->stack_size = override_stacksize;
534 pi->stack_alloc = override_stack_alloc;
535 pi->stack_size_alloc = override_stacksize_alloc;
536 /* XXX: this might be too late for future heuristics that attempt
537 to use thread stack bounds. We may need to put the new thread
538 to sleep. */
542 /* May need to clean up if we created a pthread_attr_t of our own. */
543 if (attr == NULL)
544 pthread_attr_destroy (& override_attr); /* NB: this shouldn't deallocate stack */
546 return result;
551 #if PIC
552 /* A special bootstrap variant. */
554 __mf_0fn_pthread_join (pthread_t thr, void **rc)
556 return -1;
558 #endif
561 #undef pthread_join
562 WRAPPER(int, pthread_join, pthread_t thr, void **rc)
564 DECLARE(int, pthread_join, pthread_t thr, void **rc);
565 int result;
567 TRACE ("pthread_join\n");
568 __mf_state = reentrant;
569 result = CALL_REAL (pthread_join, thr, rc);
570 __mf_state = active;
572 return result;
576 #if PIC
577 /* A special bootstrap variant. */
578 void
579 __mf_0fn_pthread_exit (void *rc)
582 #endif
585 #undef pthread_exit
586 WRAPPER(void, pthread_exit, void *rc)
588 DECLARE(void, pthread_exit, void *rc);
590 TRACE ("pthread_exit\n");
591 /* __mf_state = reentrant; */
592 CALL_REAL (pthread_exit, rc);
593 /* NOTREACHED */