2013-05-24 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libmudflap / mf-hooks1.c
blob474c8356577ac11f42db8ca820621c9a44a91d4d
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002-2013 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #include "config.h"
29 #ifndef HAVE_SOCKLEN_T
30 #define socklen_t int
31 #endif
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__) && !defined(__APPLE__)
37 #define _POSIX_SOURCE
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
39 #define _GNU_SOURCE
40 #define _XOPEN_SOURCE
41 #define _BSD_TYPES
42 #define __EXTENSIONS__
43 #define _ALL_SOURCE
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
47 #include <string.h>
48 #include <stdio.h>
49 #include <stdlib.h>
50 #include <sys/time.h>
51 #include <sys/types.h>
52 #include <unistd.h>
53 #include <assert.h>
54 #include <errno.h>
55 #include <limits.h>
56 #include <time.h>
58 #include "mf-runtime.h"
59 #include "mf-impl.h"
61 #ifdef _MUDFLAP
62 #error "Do not compile this file with -fmudflap!"
63 #endif
66 /* Memory allocation related hook functions. Some of these are
67 intercepted via linker wrapping or symbol interposition. Others
68 use plain macros in mf-runtime.h. */
71 #if PIC
73 enum { BS = 4096, NB=10 };
74 static char __mf_0fn_bufs[NB][BS];
75 static unsigned __mf_0fn_bufs_used[NB];
78 /* A special bootstrap variant. */
79 void *
80 __mf_0fn_malloc (size_t c)
82 unsigned i;
84 for (i=0; i<NB; i++)
86 if (! __mf_0fn_bufs_used[i] && c < BS)
88 __mf_0fn_bufs_used[i] = 1;
89 return & __mf_0fn_bufs[i][0];
92 return NULL;
94 #endif
97 #undef malloc
98 WRAPPER(void *, malloc, size_t c)
100 size_t size_with_crumple_zones;
101 DECLARE(void *, malloc, size_t c);
102 void *result;
103 BEGIN_PROTECT (malloc, c);
105 size_with_crumple_zones =
106 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
107 __mf_opts.crumple_zone));
108 BEGIN_MALLOC_PROTECT ();
109 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
110 END_MALLOC_PROTECT ();
112 if (LIKELY(result))
114 result += __mf_opts.crumple_zone;
115 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
116 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
119 return result;
123 #ifdef PIC
124 /* A special bootstrap variant. */
125 void *
126 __mf_0fn_calloc (size_t c, size_t n)
128 return __mf_0fn_malloc (c * n);
130 #endif
133 #undef calloc
134 WRAPPER(void *, calloc, size_t c, size_t n)
136 size_t size_with_crumple_zones;
137 DECLARE(void *, calloc, size_t, size_t);
138 DECLARE(void *, malloc, size_t);
139 DECLARE(void *, memset, void *, int, size_t);
140 char *result;
141 BEGIN_PROTECT (calloc, c, n);
143 size_with_crumple_zones =
144 CLAMPADD((c * n), /* XXX: CLAMPMUL */
145 CLAMPADD(__mf_opts.crumple_zone,
146 __mf_opts.crumple_zone));
147 BEGIN_MALLOC_PROTECT ();
148 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
149 END_MALLOC_PROTECT ();
151 if (LIKELY(result))
152 memset (result, 0, size_with_crumple_zones);
154 if (LIKELY(result))
156 result += __mf_opts.crumple_zone;
157 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
158 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
161 return result;
165 #if PIC
166 /* A special bootstrap variant. */
167 void *
168 __mf_0fn_realloc (void *buf, size_t c)
170 return NULL;
172 #endif
175 #undef realloc
176 WRAPPER(void *, realloc, void *buf, size_t c)
178 DECLARE(void * , realloc, void *, size_t);
179 size_t size_with_crumple_zones;
180 char *base = buf;
181 unsigned saved_wipe_heap;
182 char *result;
183 BEGIN_PROTECT (realloc, buf, c);
185 if (LIKELY(buf))
186 base -= __mf_opts.crumple_zone;
188 size_with_crumple_zones =
189 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
190 __mf_opts.crumple_zone));
191 BEGIN_MALLOC_PROTECT ();
192 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
193 END_MALLOC_PROTECT ();
195 /* Ensure heap wiping doesn't occur during this peculiar
196 unregister/reregister pair. */
197 LOCKTH ();
198 __mf_set_state (reentrant);
199 saved_wipe_heap = __mf_opts.wipe_heap;
200 __mf_opts.wipe_heap = 0;
202 if (LIKELY(buf))
203 __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
204 /* NB: underlying region may have been __MF_TYPE_HEAP. */
206 if (LIKELY(result))
208 result += __mf_opts.crumple_zone;
209 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
210 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
213 /* Restore previous setting. */
214 __mf_opts.wipe_heap = saved_wipe_heap;
216 __mf_set_state (active);
217 UNLOCKTH ();
219 return result;
223 #if PIC
224 /* A special bootstrap variant. */
225 void
226 __mf_0fn_free (void *buf)
228 return;
230 #endif
232 #undef free
233 WRAPPER(void, free, void *buf)
235 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
236 static void *free_queue [__MF_FREEQ_MAX];
237 static unsigned free_ptr = 0;
238 static int freeq_initialized = 0;
239 DECLARE(void, free, void *);
241 if (UNLIKELY(buf == NULL))
242 return;
244 BEGIN_PROTECT (free, buf);
246 #if PIC
247 /* Check whether the given buffer might have come from a
248 __mf_0fn_malloc/calloc call that for whatever reason was not
249 redirected back to __mf_0fn_free. If so, we just ignore the
250 call. */
251 if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
252 (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
254 VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
255 return;
257 #endif
259 LOCKTH ();
260 if (UNLIKELY(!freeq_initialized))
262 memset (free_queue, 0,
263 __MF_FREEQ_MAX * sizeof (void *));
264 freeq_initialized = 1;
266 UNLOCKTH ();
268 __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
269 /* NB: underlying region may have been __MF_TYPE_HEAP. */
271 if (UNLIKELY(__mf_opts.free_queue_length > 0))
273 char *freeme = NULL;
274 LOCKTH ();
275 if (free_queue [free_ptr] != NULL)
277 freeme = free_queue [free_ptr];
278 freeme -= __mf_opts.crumple_zone;
280 free_queue [free_ptr] = buf;
281 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
282 UNLOCKTH ();
283 if (freeme)
285 if (__mf_opts.trace_mf_calls)
287 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
288 (void *) freeme,
289 __mf_opts.crumple_zone);
291 BEGIN_MALLOC_PROTECT ();
292 CALL_REAL (free, freeme);
293 END_MALLOC_PROTECT ();
296 else
298 /* back pointer up a bit to the beginning of crumple zone */
299 char *base = (char *)buf;
300 base -= __mf_opts.crumple_zone;
301 if (__mf_opts.trace_mf_calls)
303 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
304 (void *) base,
305 (void *) buf,
306 __mf_opts.crumple_zone);
308 BEGIN_MALLOC_PROTECT ();
309 CALL_REAL (free, base);
310 END_MALLOC_PROTECT ();
315 /* We can only wrap mmap if the target supports it. Likewise for munmap.
316 We assume we have both if we have mmap. */
317 #ifdef HAVE_MMAP
319 #if PIC
320 /* A special bootstrap variant. */
321 void *
322 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
324 #if defined(__FreeBSD__)
325 if (f == 0x1000 && fd == -1 && prot == 0 && off == 0)
326 return 0;
327 #endif /* Ignore red zone allocation request for initial thread's stack. */
329 return (void *) -1;
331 #endif
334 #undef mmap
335 WRAPPER(void *, mmap,
336 void *start, size_t length, int prot,
337 int flags, int fd, off_t offset)
339 DECLARE(void *, mmap, void *, size_t, int,
340 int, int, off_t);
341 void *result;
342 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
344 result = CALL_REAL (mmap, start, length, prot,
345 flags, fd, offset);
348 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
349 (uintptr_t) start, (uintptr_t) length,
350 (uintptr_t) result);
353 if (result != (void *)-1)
355 /* Register each page as a heap object. Why not register it all
356 as a single segment? That's so that a later munmap() call
357 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
358 this more automatic? */
359 size_t ps = getpagesize ();
360 uintptr_t base = (uintptr_t) result;
361 uintptr_t offset;
363 for (offset=0; offset<length; offset+=ps)
365 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
366 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
367 appropriate for unaccessed mmap pages? */
368 __mf_register ((void *) CLAMPADD (base, offset), ps,
369 __MF_TYPE_HEAP_I, "mmap page");
373 return result;
377 #if PIC
378 /* A special bootstrap variant. */
380 __mf_0fn_munmap (void *start, size_t length)
382 return -1;
384 #endif
387 #undef munmap
388 WRAPPER(int , munmap, void *start, size_t length)
390 DECLARE(int, munmap, void *, size_t);
391 int result;
392 BEGIN_PROTECT (munmap, start, length);
394 result = CALL_REAL (munmap, start, length);
397 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
398 (uintptr_t) start, (uintptr_t) length,
399 (uintptr_t) result);
402 if (result == 0)
404 /* Unregister each page as a heap object. */
405 size_t ps = getpagesize ();
406 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
407 uintptr_t offset;
409 for (offset=0; offset<length; offset+=ps)
410 __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
412 return result;
414 #endif /* HAVE_MMAP */
417 #ifdef HAVE_MMAP64
418 #if PIC
419 /* A special bootstrap variant. */
420 void *
421 __mf_0fn_mmap64 (void *start, size_t l, int prot, int f, int fd, off64_t off)
423 return (void *) -1;
425 #endif
428 #undef mmap
429 WRAPPER(void *, mmap64,
430 void *start, size_t length, int prot,
431 int flags, int fd, off64_t offset)
433 DECLARE(void *, mmap64, void *, size_t, int,
434 int, int, off64_t);
435 void *result;
436 BEGIN_PROTECT (mmap64, start, length, prot, flags, fd, offset);
438 result = CALL_REAL (mmap64, start, length, prot,
439 flags, fd, offset);
442 VERBOSE_TRACE ("mmap64 (%08lx, %08lx, ...) => %08lx\n",
443 (uintptr_t) start, (uintptr_t) length,
444 (uintptr_t) result);
447 if (result != (void *)-1)
449 /* Register each page as a heap object. Why not register it all
450 as a single segment? That's so that a later munmap() call
451 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
452 this more automatic? */
453 size_t ps = getpagesize ();
454 uintptr_t base = (uintptr_t) result;
455 uintptr_t offset;
457 for (offset=0; offset<length; offset+=ps)
459 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
460 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
461 appropriate for unaccessed mmap pages? */
462 __mf_register ((void *) CLAMPADD (base, offset), ps,
463 __MF_TYPE_HEAP_I, "mmap64 page");
467 return result;
469 #endif /* HAVE_MMAP64 */
472 /* This wrapper is a little different, as it's called indirectly from
473 __mf_fini also to clean up pending allocations. */
474 void *
475 __mf_wrap_alloca_indirect (size_t c)
477 DECLARE (void *, malloc, size_t);
478 DECLARE (void, free, void *);
480 /* This struct, a linked list, tracks alloca'd objects. The newest
481 object is at the head of the list. If we detect that we've
482 popped a few levels of stack, then the listed objects are freed
483 as needed. NB: The tracking struct is allocated with
484 real_malloc; the user data with wrap_malloc.
486 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
487 static struct alloca_tracking *alloca_history = NULL;
489 void *stack = __builtin_frame_address (0);
490 void *result;
491 struct alloca_tracking *track;
493 TRACE ("%s\n", __PRETTY_FUNCTION__);
494 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
496 /* XXX: thread locking! */
498 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
499 which must therefore have exited by now. */
501 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
503 while (alloca_history &&
504 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
506 struct alloca_tracking *next = alloca_history->next;
507 __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
508 BEGIN_MALLOC_PROTECT ();
509 CALL_REAL (free, alloca_history->ptr);
510 CALL_REAL (free, alloca_history);
511 END_MALLOC_PROTECT ();
512 alloca_history = next;
515 /* Allocate new block. */
516 result = NULL;
517 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
519 BEGIN_MALLOC_PROTECT ();
520 track = (struct alloca_tracking *) CALL_REAL (malloc,
521 sizeof (struct alloca_tracking));
522 END_MALLOC_PROTECT ();
523 if (LIKELY (track != NULL))
525 BEGIN_MALLOC_PROTECT ();
526 result = CALL_REAL (malloc, c);
527 END_MALLOC_PROTECT ();
528 if (UNLIKELY (result == NULL))
530 BEGIN_MALLOC_PROTECT ();
531 CALL_REAL (free, track);
532 END_MALLOC_PROTECT ();
533 /* Too bad. XXX: What about errno? */
535 else
537 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
538 track->ptr = result;
539 track->stack = stack;
540 track->next = alloca_history;
541 alloca_history = track;
546 return result;
550 #undef alloca
551 WRAPPER(void *, alloca, size_t c)
553 return __mf_wrap_alloca_indirect (c);