PR middle-end/27945
[official-gcc.git] / libmudflap / mf-hooks1.c
blobacdbc447a5d752e71a6188349d276b56c8a0c4d3
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
30 02110-1301, USA. */
33 #include "config.h"
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
43 #define _POSIX_SOURCE
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
45 #define _GNU_SOURCE
46 #define _XOPEN_SOURCE
47 #define _BSD_TYPES
48 #define __EXTENSIONS__
49 #define _ALL_SOURCE
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
53 #include <string.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <unistd.h>
59 #include <assert.h>
60 #include <errno.h>
61 #include <limits.h>
62 #include <time.h>
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
77 #if PIC
79 enum { BS = 4096, NB=10 };
80 static char __mf_0fn_bufs[NB][BS];
81 static unsigned __mf_0fn_bufs_used[NB];
84 /* A special bootstrap variant. */
85 void *
86 __mf_0fn_malloc (size_t c)
88 unsigned i;
90 for (i=0; i<NB; i++)
92 if (! __mf_0fn_bufs_used[i] && c < BS)
94 __mf_0fn_bufs_used[i] = 1;
95 return & __mf_0fn_bufs[i][0];
98 return NULL;
100 #endif
103 #undef malloc
104 WRAPPER(void *, malloc, size_t c)
106 size_t size_with_crumple_zones;
107 DECLARE(void *, malloc, size_t c);
108 void *result;
109 BEGIN_PROTECT (malloc, c);
111 size_with_crumple_zones =
112 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
113 __mf_opts.crumple_zone));
114 BEGIN_MALLOC_PROTECT ();
115 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
116 END_MALLOC_PROTECT ();
118 if (LIKELY(result))
120 result += __mf_opts.crumple_zone;
121 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
122 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
125 return result;
129 #ifdef PIC
130 /* A special bootstrap variant. */
131 void *
132 __mf_0fn_calloc (size_t c, size_t n)
134 return __mf_0fn_malloc (c * n);
136 #endif
139 #undef calloc
140 WRAPPER(void *, calloc, size_t c, size_t n)
142 size_t size_with_crumple_zones;
143 DECLARE(void *, calloc, size_t, size_t);
144 DECLARE(void *, malloc, size_t);
145 DECLARE(void *, memset, void *, int, size_t);
146 char *result;
147 BEGIN_PROTECT (calloc, c, n);
149 size_with_crumple_zones =
150 CLAMPADD((c * n), /* XXX: CLAMPMUL */
151 CLAMPADD(__mf_opts.crumple_zone,
152 __mf_opts.crumple_zone));
153 BEGIN_MALLOC_PROTECT ();
154 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
155 END_MALLOC_PROTECT ();
157 if (LIKELY(result))
158 memset (result, 0, size_with_crumple_zones);
160 if (LIKELY(result))
162 result += __mf_opts.crumple_zone;
163 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
164 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
167 return result;
171 #if PIC
172 /* A special bootstrap variant. */
173 void *
174 __mf_0fn_realloc (void *buf, size_t c)
176 return NULL;
178 #endif
181 #undef realloc
182 WRAPPER(void *, realloc, void *buf, size_t c)
184 DECLARE(void * , realloc, void *, size_t);
185 size_t size_with_crumple_zones;
186 char *base = buf;
187 unsigned saved_wipe_heap;
188 char *result;
189 BEGIN_PROTECT (realloc, buf, c);
191 if (LIKELY(buf))
192 base -= __mf_opts.crumple_zone;
194 size_with_crumple_zones =
195 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
196 __mf_opts.crumple_zone));
197 BEGIN_MALLOC_PROTECT ();
198 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
199 END_MALLOC_PROTECT ();
201 /* Ensure heap wiping doesn't occur during this peculiar
202 unregister/reregister pair. */
203 LOCKTH ();
204 __mf_set_state (reentrant);
205 saved_wipe_heap = __mf_opts.wipe_heap;
206 __mf_opts.wipe_heap = 0;
208 if (LIKELY(buf))
209 __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
210 /* NB: underlying region may have been __MF_TYPE_HEAP. */
212 if (LIKELY(result))
214 result += __mf_opts.crumple_zone;
215 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
216 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
219 /* Restore previous setting. */
220 __mf_opts.wipe_heap = saved_wipe_heap;
222 __mf_set_state (active);
223 UNLOCKTH ();
225 return result;
229 #if PIC
230 /* A special bootstrap variant. */
231 void
232 __mf_0fn_free (void *buf)
234 return;
236 #endif
238 #undef free
239 WRAPPER(void, free, void *buf)
241 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
242 static void *free_queue [__MF_FREEQ_MAX];
243 static unsigned free_ptr = 0;
244 static int freeq_initialized = 0;
245 DECLARE(void, free, void *);
247 BEGIN_PROTECT (free, buf);
249 if (UNLIKELY(buf == NULL))
250 return;
252 #if PIC
253 /* Check whether the given buffer might have come from a
254 __mf_0fn_malloc/calloc call that for whatever reason was not
255 redirected back to __mf_0fn_free. If so, we just ignore the
256 call. */
257 if (UNLIKELY((uintptr_t) buf >= (uintptr_t) __mf_0fn_bufs &&
258 (uintptr_t) buf < ((uintptr_t) __mf_0fn_bufs + sizeof(__mf_0fn_bufs))))
260 VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf);
261 return;
263 #endif
265 LOCKTH ();
266 if (UNLIKELY(!freeq_initialized))
268 memset (free_queue, 0,
269 __MF_FREEQ_MAX * sizeof (void *));
270 freeq_initialized = 1;
272 UNLOCKTH ();
274 __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
275 /* NB: underlying region may have been __MF_TYPE_HEAP. */
277 if (UNLIKELY(__mf_opts.free_queue_length > 0))
279 char *freeme = NULL;
280 LOCKTH ();
281 if (free_queue [free_ptr] != NULL)
283 freeme = free_queue [free_ptr];
284 freeme -= __mf_opts.crumple_zone;
286 free_queue [free_ptr] = buf;
287 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
288 UNLOCKTH ();
289 if (freeme)
291 if (__mf_opts.trace_mf_calls)
293 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
294 (void *) freeme,
295 __mf_opts.crumple_zone);
297 BEGIN_MALLOC_PROTECT ();
298 CALL_REAL (free, freeme);
299 END_MALLOC_PROTECT ();
302 else
304 /* back pointer up a bit to the beginning of crumple zone */
305 char *base = (char *)buf;
306 base -= __mf_opts.crumple_zone;
307 if (__mf_opts.trace_mf_calls)
309 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
310 (void *) base,
311 (void *) buf,
312 __mf_opts.crumple_zone);
314 BEGIN_MALLOC_PROTECT ();
315 CALL_REAL (free, base);
316 END_MALLOC_PROTECT ();
321 /* We can only wrap mmap if the target supports it. Likewise for munmap.
322 We assume we have both if we have mmap. */
323 #ifdef HAVE_MMAP
325 #if PIC
326 /* A special bootstrap variant. */
327 void *
328 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
330 return (void *) -1;
332 #endif
335 #undef mmap
336 WRAPPER(void *, mmap,
337 void *start, size_t length, int prot,
338 int flags, int fd, off_t offset)
340 DECLARE(void *, mmap, void *, size_t, int,
341 int, int, off_t);
342 void *result;
343 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
345 result = CALL_REAL (mmap, start, length, prot,
346 flags, fd, offset);
349 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
350 (uintptr_t) start, (uintptr_t) length,
351 (uintptr_t) result);
354 if (result != (void *)-1)
356 /* Register each page as a heap object. Why not register it all
357 as a single segment? That's so that a later munmap() call
358 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
359 this more automatic? */
360 size_t ps = getpagesize ();
361 uintptr_t base = (uintptr_t) result;
362 uintptr_t offset;
364 for (offset=0; offset<length; offset+=ps)
366 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
367 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
368 appropriate for unaccessed mmap pages? */
369 __mf_register ((void *) CLAMPADD (base, offset), ps,
370 __MF_TYPE_HEAP_I, "mmap page");
374 return result;
378 #if PIC
379 /* A special bootstrap variant. */
381 __mf_0fn_munmap (void *start, size_t length)
383 return -1;
385 #endif
388 #undef munmap
389 WRAPPER(int , munmap, void *start, size_t length)
391 DECLARE(int, munmap, void *, size_t);
392 int result;
393 BEGIN_PROTECT (munmap, start, length);
395 result = CALL_REAL (munmap, start, length);
398 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
399 (uintptr_t) start, (uintptr_t) length,
400 (uintptr_t) result);
403 if (result == 0)
405 /* Unregister each page as a heap object. */
406 size_t ps = getpagesize ();
407 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
408 uintptr_t offset;
410 for (offset=0; offset<length; offset+=ps)
411 __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
413 return result;
415 #endif /* HAVE_MMAP */
418 /* This wrapper is a little different, as it's called indirectly from
419 __mf_fini also to clean up pending allocations. */
420 void *
421 __mf_wrap_alloca_indirect (size_t c)
423 DECLARE (void *, malloc, size_t);
424 DECLARE (void, free, void *);
426 /* This struct, a linked list, tracks alloca'd objects. The newest
427 object is at the head of the list. If we detect that we've
428 popped a few levels of stack, then the listed objects are freed
429 as needed. NB: The tracking struct is allocated with
430 real_malloc; the user data with wrap_malloc.
432 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
433 static struct alloca_tracking *alloca_history = NULL;
435 void *stack = __builtin_frame_address (0);
436 void *result;
437 struct alloca_tracking *track;
439 TRACE ("%s\n", __PRETTY_FUNCTION__);
440 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
442 /* XXX: thread locking! */
444 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
445 which must therefore have exited by now. */
447 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
449 while (alloca_history &&
450 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
452 struct alloca_tracking *next = alloca_history->next;
453 __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
454 BEGIN_MALLOC_PROTECT ();
455 CALL_REAL (free, alloca_history->ptr);
456 CALL_REAL (free, alloca_history);
457 END_MALLOC_PROTECT ();
458 alloca_history = next;
461 /* Allocate new block. */
462 result = NULL;
463 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
465 BEGIN_MALLOC_PROTECT ();
466 track = (struct alloca_tracking *) CALL_REAL (malloc,
467 sizeof (struct alloca_tracking));
468 END_MALLOC_PROTECT ();
469 if (LIKELY (track != NULL))
471 BEGIN_MALLOC_PROTECT ();
472 result = CALL_REAL (malloc, c);
473 END_MALLOC_PROTECT ();
474 if (UNLIKELY (result == NULL))
476 BEGIN_MALLOC_PROTECT ();
477 CALL_REAL (free, track);
478 END_MALLOC_PROTECT ();
479 /* Too bad. XXX: What about errno? */
481 else
483 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
484 track->ptr = result;
485 track->stack = stack;
486 track->next = alloca_history;
487 alloca_history = track;
492 return result;
496 #undef alloca
497 WRAPPER(void *, alloca, size_t c)
499 return __mf_wrap_alloca_indirect (c);