1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
35 #ifndef HAVE_SOCKLEN_T
39 /* These attempt to coax various unix flavours to declare all our
40 needed tidbits in the system headers. */
41 #if !defined(__FreeBSD__) && !defined(__APPLE__)
43 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
47 #define __EXTENSIONS__
49 #define _LARGE_FILE_API
50 #define _XOPEN_SOURCE_EXTENDED 1
55 #include <sys/types.h>
64 #include "mf-runtime.h"
68 #error "Do not compile this file with -fmudflap!"
72 /* Multithreading support hooks. */
77 #error "pthreadstuff is to be included only in libmudflapth"
82 /* Describe a thread (dead or alive). */
85 short used_p
; /* Is this slot in use? */
86 short dead_p
; /* Is this thread dead? */
87 pthread_t self
; /* The thread id. */
89 /* If libmudflapth allocated the stack, store its adjusted base/size. */
92 /* The _alloc fields store unadjusted values from the moment of allocation. */
94 size_t stack_size_alloc
;
97 enum __mf_state_enum state
;
101 /* Describe the startup information for a new user thread. */
102 struct pthread_start_info
104 /* The user's thread entry point and argument. */
105 void * (*user_fn
)(void *);
108 /* Set by user thread when this startup struct may be disposed of. */
109 struct pthread_info
*thread_info
;
115 /* To avoid dynamic memory allocation, use static array to store these
116 thread description structs. The second (_idx) array is used as a
117 simple caching hash table, mapping PTHREAD_HASH(thread) to its
118 index in __mf_pthread_info[]. */
120 #define LIBMUDFLAPTH_THREADS_MAX 1024
121 static struct pthread_info __mf_pthread_info
[LIBMUDFLAPTH_THREADS_MAX
];
122 static unsigned __mf_pthread_info_idx
[LIBMUDFLAPTH_THREADS_MAX
];
123 #define PTHREAD_HASH(p) ((unsigned) (p) % LIBMUDFLAPTH_THREADS_MAX)
126 /* Find any old empty entry in __mf_pthread_info; mark it used and
127 return it. Return NULL if there are no more available slots. */
129 __mf_allocate_blank_threadinfo (unsigned* idx
)
131 static unsigned probe
= LIBMUDFLAPTH_THREADS_MAX
-1;
132 unsigned probe_at_start
= probe
;
133 static pthread_mutex_t mutex
=
134 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
135 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
;
137 PTHREAD_MUTEX_INITIALIZER
;
141 rc
= pthread_mutex_lock (& mutex
);
144 /* Look for a blank spot starting one past the last one we found. */
147 probe
= (probe
+ 1) % LIBMUDFLAPTH_THREADS_MAX
;
148 struct pthread_info
* pi
= & __mf_pthread_info
[probe
];
151 /* memset (pi, 0, sizeof (*pi)); */
153 if (idx
!= NULL
) *idx
= probe
;
154 /* VERBOSE_TRACE ("allocated threadinfo slot %u\n", probe); */
155 rc
= pthread_mutex_unlock (& mutex
);
160 while (probe
!= probe_at_start
);
162 rc
= pthread_mutex_unlock (& mutex
);
168 /* Find and return the pthread_info struct for the current thread.
169 There might already be one in __mf_pthread_info for this thread, in
170 which case return it. There may not be one (if this is a main
171 thread, an auxiliary -lpthread manager, or an actual user thread
172 making an early call into libmudflap. In these cases, create a new
173 entry. If not it's not the main thread, put it into reentrant
176 NB: VERBOSE_TRACE type functions are not generally safe to call
177 from this context, since a new thread might just be "booting up",
178 making printf unsafe to call.
180 static struct pthread_info
*
181 __mf_find_threadinfo ()
183 pthread_t it
= pthread_self ();
184 unsigned *hash
= & __mf_pthread_info_idx
[PTHREAD_HASH (it
)];
185 struct pthread_info
*result
= NULL
;
186 static pthread_t last
;
187 static int main_thread_seen_p
;
189 /* Check out the lookup cache; failing that, do a linear search
192 struct pthread_info
* pi
= & __mf_pthread_info
[*hash
];
195 if (pi
->used_p
&& pi
->self
== it
)
197 else for (i
= 0; i
< LIBMUDFLAPTH_THREADS_MAX
; i
++)
199 struct pthread_info
* pi2
= & __mf_pthread_info
[i
];
200 if (pi2
->used_p
&& pi2
->self
== it
)
211 /* Create a __mf_pthread_info record for the main thread. It's
212 different from the auto-recognized worker bees because for
213 example we can assume that it's a fully stack/errno-equipped
216 /* This must be the main thread, until now unseen in libmudflap. */
217 unsigned *hash
= & __mf_pthread_info_idx
[PTHREAD_HASH (it
)];
218 struct pthread_info
* pi
= __mf_allocate_blank_threadinfo (hash
);
224 if (! main_thread_seen_p
)
226 result
->state
= active
;
227 /* NB: leave result->thread_errno unset, as main thread's errno
228 has already been registered in __mf_init. */
229 /* NB: leave stack-related fields unset, to avoid
231 main_thread_seen_p
= 1;
232 /* VERBOSE_TRACE ("identified self as main thread\n"); */
236 result
->state
= reentrant
;
237 /* NB: leave result->thread_errno unset, as worker thread's
238 errno is unlikely to be used, and user threads fill them
239 in during __mf_pthread_spawn(). */
240 /* NB: leave stack-related fields unset, leaving pthread_create
241 to fill them in for user threads, leaving them empty for
243 /* VERBOSE_TRACE ("identified self as new aux or user thread\n"); */
250 VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
257 assert (result
!= NULL
);
258 assert (result
->self
== it
);
265 /* Return a pointer to the per-thread __mf_state variable. */
266 enum __mf_state_enum
*
267 __mf_state_perthread ()
269 assert (! __mf_starting_p
);
270 return & (__mf_find_threadinfo()->state
);
275 __mf_pthread_cleanup (void *arg
)
277 struct pthread_info
*pi
= arg
;
279 /* XXX: This unregistration is not safe on platforms where distinct
280 threads share errno (or at least its virtual address). */
281 if (pi
->thread_errno
!= NULL
)
282 __mf_unregister (pi
->thread_errno
, sizeof (int), __MF_TYPE_GUESS
);
284 /* XXX: Only detached threads should designate themselves as dead
285 here. Non-detached threads are marked dead after their
286 personalized pthread_join() call. */
287 pi
->state
= reentrant
;
290 VERBOSE_TRACE ("thread pi %p exiting\n", pi
);
295 __mf_pthread_spawner (void *arg
)
297 struct pthread_info
*pi
= __mf_find_threadinfo ();
300 /* Turn off reentrancy indications. */
301 assert (pi
->state
== reentrant
);
304 VERBOSE_TRACE ("new user thread\n");
306 if (__mf_opts
.heur_std_data
)
308 pi
->thread_errno
= & errno
;
309 __mf_register (pi
->thread_errno
, sizeof (int),
310 __MF_TYPE_GUESS
, "errno area (thread)");
311 /* NB: we could use __MF_TYPE_STATIC above, but we guess that
312 the thread errno is coming out of some dynamically allocated
313 pool that we already know of as __MF_TYPE_HEAP. */
316 /* We considered using pthread_key_t objects instead of these
317 cleanup stacks, but they were less cooperative with the
318 interposed malloc hooks in libmudflap. */
319 pthread_cleanup_push (& __mf_pthread_cleanup
, pi
);
321 /* Call user thread */
323 /* Extract given entry point and argument. */
324 struct pthread_start_info
*psi
= arg
;
325 void * (*user_fn
)(void *) = psi
->user_fn
;
326 void *user_arg
= psi
->user_arg
;
328 /* Signal the main thread to resume. */
329 psi
->thread_info
= pi
;
331 result
= (*user_fn
)(user_arg
);
334 pthread_cleanup_pop (1 /* execute */);
336 /* NB: there is a slight race here. The pthread_info field will now
337 say this thread is dead, but it may still be running .. right
338 here. We try to check for this possibility using the
339 pthread_kill test below. */
346 /* A special bootstrap variant. */
348 __mf_0fn_pthread_create (pthread_t
*thr
, const pthread_attr_t
*attr
,
349 void * (*start
) (void *), void *arg
)
356 #undef pthread_create
357 WRAPPER(int, pthread_create
, pthread_t
*thr
, const pthread_attr_t
*attr
,
358 void * (*start
) (void *), void *arg
)
360 DECLARE(int, munmap
, void *p
, size_t l
);
361 DECLARE(void *, mmap
, void *p
, size_t l
, int prot
, int flags
, int fd
, off_t of
);
362 DECLARE(int, pthread_create
, pthread_t
*thr
, const pthread_attr_t
*attr
,
363 void * (*start
) (void *), void *arg
);
365 pthread_attr_t override_attr
;
366 void *override_stack
;
367 size_t override_stacksize
;
368 void *override_stack_alloc
= (void *) 0;
369 size_t override_stacksize_alloc
= 0;
372 TRACE ("pthread_create\n");
374 /* Garbage-collect dead threads' stacks. */
376 for (i
= 0; i
< LIBMUDFLAPTH_THREADS_MAX
; i
++)
378 struct pthread_info
*pi
= & __mf_pthread_info
[i
];
384 /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
385 (unsigned) pi->self, pi, pi->dead_p); */
387 /* Delay actual deallocation by a few cycles, try to discourage the
388 race mentioned at the end of __mf_pthread_spawner(). */
391 if (pi
->dead_p
>= 10 /* XXX */)
394 CALL_REAL (munmap
, pi
->stack_alloc
, pi
->stack_size_alloc
);
396 VERBOSE_TRACE ("slot %u freed, stack %p\n", i
, pi
->stack_alloc
);
397 memset (pi
, 0, sizeof (*pi
));
399 /* One round of garbage collection is enough. */
405 /* Let's allocate a stack for this thread, if one is not already
406 supplied by the caller. We don't want to let e.g. the
407 linuxthreads manager thread do this allocation. */
409 override_attr
= *attr
;
411 pthread_attr_init (& override_attr
);
413 /* Get supplied attributes, if any. */
414 /* XXX: consider using POSIX2K attr_getstack() */
415 if (pthread_attr_getstackaddr (& override_attr
, & override_stack
) != 0 ||
416 pthread_attr_getstacksize (& override_attr
, & override_stacksize
) != 0)
418 override_stack
= NULL
;
419 override_stacksize
= 0;
422 /* Do we need to allocate the new thread's stack? */
423 if (__mf_opts
.thread_stack
&& override_stack
== NULL
)
425 uintptr_t alignment
= 256; /* power of two */
427 /* Perturb the initial stack addresses slightly, to encourage
428 threads to have nonconflicting entries in the lookup cache
429 for their tracked stack objects. */
430 static unsigned perturb
= 0;
431 const unsigned perturb_delta
= 32;
432 const unsigned perturb_count
= 16;
433 perturb
+= perturb_delta
;
434 if (perturb
> perturb_delta
*perturb_count
) perturb
= 0;
436 /* Use glibc x86 defaults */
437 /* Should have been defined in <limits.h> */
438 #ifndef PTHREAD_STACK_MIN
439 #define PTHREAD_STACK_MIN 65536
441 override_stacksize
= max (PTHREAD_STACK_MIN
, __mf_opts
.thread_stack
* 1024);
444 #if defined(MAP_ANONYMOUS)
445 #define MF_MAP_ANON MAP_ANONYMOUS
446 #elif defined(MAP_ANON)
447 #define MF_MAP_ANON MAP_ANON
451 #define MAP_FAILED ((void *) -1)
455 override_stack
= CALL_REAL (mmap
, NULL
, override_stacksize
,
456 PROT_READ
|PROT_WRITE
,
457 MAP_PRIVATE
|MF_MAP_ANON
,
460 /* Try mapping /dev/zero instead. */
462 static int zerofd
= -1;
464 zerofd
= open ("/dev/zero", O_RDWR
);
466 override_stack
= MAP_FAILED
;
468 override_stack
= CALL_REAL (mmap
, NULL
, override_stacksize
,
469 PROT_READ
|PROT_WRITE
,
470 MAP_PRIVATE
, zerofd
, 0);
474 if (override_stack
== 0 || override_stack
== MAP_FAILED
)
480 VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
481 override_stack
, (unsigned long) override_stacksize
);
483 /* Save the original allocated values for later deallocation. */
484 override_stack_alloc
= override_stack
;
485 override_stacksize_alloc
= override_stacksize
;
487 /* The stackaddr pthreads attribute is a candidate stack pointer.
488 It must point near the top or the bottom of this buffer, depending
489 on whether stack grows downward or upward, and suitably aligned.
490 On the x86, it grows down, so we set stackaddr near the top. */
491 /* XXX: port logic */
492 override_stack
= (void *)
493 (((uintptr_t) override_stack
+ override_stacksize
- alignment
- perturb
)
494 & (~(uintptr_t)(alignment
-1)));
496 /* XXX: consider using POSIX2K attr_setstack() */
497 if (pthread_attr_setstackaddr (& override_attr
, override_stack
) != 0 ||
498 pthread_attr_setstacksize (& override_attr
,
499 override_stacksize
- alignment
- perturb
) != 0)
501 /* This should not happen. */
502 CALL_REAL (munmap
, override_stack
, override_stacksize
);
508 /* Actually start the child thread. */
510 struct pthread_start_info psi
;
511 struct pthread_info
*pi
= NULL
;
513 /* Fill in startup-control fields. */
516 psi
.thread_info
= NULL
;
518 /* Actually create the thread. */
519 __mf_state
= reentrant
;
520 result
= CALL_REAL (pthread_create
, thr
, & override_attr
,
521 & __mf_pthread_spawner
, (void *) & psi
);
523 /* We also hook pthread_join/pthread_exit to get into reentrant
524 mode during thread shutdown/cleanup. */
526 /* Wait until child thread has progressed far enough into its
527 __mf_pthread_spawner() call. */
528 while (1) /* XXX: timeout? */
530 volatile struct pthread_start_info
*psip
= & psi
;
531 pi
= psip
->thread_info
;
537 /* Fill in remaining fields in pthread_info. */
538 pi
->stack
= override_stack
;
539 pi
->stack_size
= override_stacksize
;
540 pi
->stack_alloc
= override_stack_alloc
;
541 pi
->stack_size_alloc
= override_stacksize_alloc
;
542 /* XXX: this might be too late for future heuristics that attempt
543 to use thread stack bounds. We may need to put the new thread
548 /* May need to clean up if we created a pthread_attr_t of our own. */
550 pthread_attr_destroy (& override_attr
); /* NB: this shouldn't deallocate stack */
558 /* A special bootstrap variant. */
560 __mf_0fn_pthread_join (pthread_t thr
, void **rc
)
568 WRAPPER(int, pthread_join
, pthread_t thr
, void **rc
)
570 DECLARE(int, pthread_join
, pthread_t thr
, void **rc
);
573 TRACE ("pthread_join\n");
574 __mf_state
= reentrant
;
575 result
= CALL_REAL (pthread_join
, thr
, rc
);
583 /* A special bootstrap variant. */
585 __mf_0fn_pthread_exit (void *rc
)
592 WRAPPER(void, pthread_exit
, void *rc
)
594 DECLARE(void, pthread_exit
, void *rc
);
596 TRACE ("pthread_exit\n");
597 /* __mf_state = reentrant; */
598 CALL_REAL (pthread_exit
, rc
);
600 exit (0); /* Satisfy noreturn attribute of pthread_exit. */