2004-06-02 Eric Christopher <echristo@redhat.com>
[official-gcc.git] / libmudflap / mf-hooks1.c
blob7199472ebe1facd49f7f241aa223412a7733a64e
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
33 #include "config.h"
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
43 #define _POSIX_SOURCE
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
45 #define _GNU_SOURCE
46 #define _XOPEN_SOURCE
47 #define _BSD_TYPES
48 #define __EXTENSIONS__
49 #define _ALL_SOURCE
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
53 #include <string.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <unistd.h>
59 #include <assert.h>
60 #include <errno.h>
61 #include <limits.h>
62 #include <time.h>
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
77 #ifdef WRAP_malloc
79 #if PIC
80 /* A special bootstrap variant. */
81 void *
82 __mf_0fn_malloc (size_t c)
84 /* fprintf (stderr, "0fn malloc c=%lu\n", c); */
85 return NULL;
87 #endif
89 #undef malloc
90 WRAPPER(void *, malloc, size_t c)
92 size_t size_with_crumple_zones;
93 DECLARE(void *, malloc, size_t c);
94 void *result;
95 BEGIN_PROTECT (malloc, c);
97 size_with_crumple_zones =
98 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
99 __mf_opts.crumple_zone));
100 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
102 if (LIKELY(result))
104 result += __mf_opts.crumple_zone;
105 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
106 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
109 return result;
111 #endif
114 #ifdef WRAP_calloc
116 #ifdef PIC
117 /* A special bootstrap variant. */
118 void *
119 __mf_0fn_calloc (size_t c, size_t n)
121 enum foo { BS = 4096, NB=10 };
122 static char bufs[NB][BS];
123 static unsigned bufs_used[NB];
124 unsigned i;
126 /* fprintf (stderr, "0fn calloc c=%lu n=%lu\n", c, n); */
127 for (i=0; i<NB; i++)
129 if (! bufs_used[i] && (c*n) < BS)
131 bufs_used[i] = 1;
132 return & bufs[i][0];
135 return NULL;
137 #endif
139 #undef calloc
140 WRAPPER(void *, calloc, size_t c, size_t n)
142 size_t size_with_crumple_zones;
143 DECLARE(void *, calloc, size_t, size_t);
144 DECLARE(void *, malloc, size_t);
145 DECLARE(void *, memset, void *, int, size_t);
146 char *result;
147 BEGIN_PROTECT (calloc, c, n);
149 size_with_crumple_zones =
150 CLAMPADD((c * n), /* XXX: CLAMPMUL */
151 CLAMPADD(__mf_opts.crumple_zone,
152 __mf_opts.crumple_zone));
153 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
155 if (LIKELY(result))
156 memset (result, 0, size_with_crumple_zones);
158 if (LIKELY(result))
160 result += __mf_opts.crumple_zone;
161 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
162 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
165 return result;
167 #endif
169 #ifdef WRAP_realloc
171 #if PIC
172 /* A special bootstrap variant. */
173 void *
174 __mf_0fn_realloc (void *buf, size_t c)
176 return NULL;
178 #endif
180 #undef realloc
181 WRAPPER(void *, realloc, void *buf, size_t c)
183 DECLARE(void * , realloc, void *, size_t);
184 size_t size_with_crumple_zones;
185 char *base = buf;
186 unsigned saved_wipe_heap;
187 char *result;
188 BEGIN_PROTECT (realloc, buf, c);
190 if (LIKELY(buf))
191 base -= __mf_opts.crumple_zone;
193 size_with_crumple_zones =
194 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
195 __mf_opts.crumple_zone));
196 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
198 /* Ensure heap wiping doesn't occur during this peculiar
199 unregister/reregister pair. */
200 LOCKTH ();
201 __mf_state = reentrant;
202 saved_wipe_heap = __mf_opts.wipe_heap;
203 __mf_opts.wipe_heap = 0;
205 if (LIKELY(buf))
206 __mfu_unregister (buf, 0);
208 if (LIKELY(result))
210 result += __mf_opts.crumple_zone;
211 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
212 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
215 /* Restore previous setting. */
216 __mf_opts.wipe_heap = saved_wipe_heap;
218 __mf_state = active;
219 UNLOCKTH ();
221 return result;
223 #endif
226 #ifdef WRAP_free
228 #if PIC
229 /* A special bootstrap variant. */
230 void
231 __mf_0fn_free (void *buf)
233 return;
235 #endif
237 #undef free
238 WRAPPER(void, free, void *buf)
240 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
241 static void *free_queue [__MF_FREEQ_MAX];
242 static unsigned free_ptr = 0;
243 static int freeq_initialized = 0;
244 DECLARE(void, free, void *);
246 BEGIN_PROTECT (free, buf);
248 if (UNLIKELY(buf == NULL))
249 return;
251 LOCKTH ();
252 if (UNLIKELY(!freeq_initialized))
254 memset (free_queue, 0,
255 __MF_FREEQ_MAX * sizeof (void *));
256 freeq_initialized = 1;
258 UNLOCKTH ();
260 __mf_unregister (buf, 0);
262 if (UNLIKELY(__mf_opts.free_queue_length > 0))
264 char *freeme = NULL;
265 LOCKTH ();
266 if (free_queue [free_ptr] != NULL)
268 freeme = free_queue [free_ptr];
269 freeme -= __mf_opts.crumple_zone;
271 free_queue [free_ptr] = buf;
272 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
273 UNLOCKTH ();
274 if (freeme)
276 if (__mf_opts.trace_mf_calls)
278 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
279 (void *) freeme,
280 __mf_opts.crumple_zone);
282 CALL_REAL (free, freeme);
285 else
287 /* back pointer up a bit to the beginning of crumple zone */
288 char *base = (char *)buf;
289 base -= __mf_opts.crumple_zone;
290 if (__mf_opts.trace_mf_calls)
292 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
293 (void *) base,
294 (void *) buf,
295 __mf_opts.crumple_zone);
297 CALL_REAL (free, base);
300 #endif
303 #ifdef WRAP_mmap
305 #if PIC
306 /* A special bootstrap variant. */
307 void *
308 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
310 return (void *) -1;
312 #endif
315 #undef mmap
316 WRAPPER(void *, mmap,
317 void *start, size_t length, int prot,
318 int flags, int fd, off_t offset)
320 DECLARE(void *, mmap, void *, size_t, int,
321 int, int, off_t);
322 void *result;
323 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
325 result = CALL_REAL (mmap, start, length, prot,
326 flags, fd, offset);
329 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
330 (uintptr_t) start, (uintptr_t) length,
331 (uintptr_t) result);
334 if (result != (void *)-1)
336 /* Register each page as a heap object. Why not register it all
337 as a single segment? That's so that a later munmap() call
338 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
339 this more automatic? */
340 size_t ps = getpagesize ();
341 uintptr_t base = (uintptr_t) result;
342 uintptr_t offset;
344 for (offset=0; offset<length; offset+=ps)
346 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
347 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
348 appropriate for unaccessed mmap pages? */
349 __mf_register ((void *) CLAMPADD (base, offset), ps,
350 __MF_TYPE_HEAP_I, "mmap page");
354 return result;
356 #endif
359 #ifdef WRAP_munmap
361 #if PIC
362 /* A special bootstrap variant. */
364 __mf_0fn_munmap (void *start, size_t length)
366 return -1;
368 #endif
371 #undef munmap
372 WRAPPER(int , munmap, void *start, size_t length)
374 DECLARE(int, munmap, void *, size_t);
375 int result;
376 BEGIN_PROTECT (munmap, start, length);
378 result = CALL_REAL (munmap, start, length);
381 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
382 (uintptr_t) start, (uintptr_t) length,
383 (uintptr_t) result);
386 if (result == 0)
388 /* Unregister each page as a heap object. */
389 size_t ps = getpagesize ();
390 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
391 uintptr_t offset;
393 for (offset=0; offset<length; offset+=ps)
394 __mf_unregister ((void *) CLAMPADD (base, offset), ps);
396 return result;
398 #endif
401 #ifdef WRAP_alloca
403 /* This wrapper is a little different, as it's called indirectly from
404 __mf_fini also to clean up pending allocations. */
405 void *
406 __mf_wrap_alloca_indirect (size_t c)
408 DECLARE (void *, malloc, size_t);
409 DECLARE (void, free, void *);
411 /* This struct, a linked list, tracks alloca'd objects. The newest
412 object is at the head of the list. If we detect that we've
413 popped a few levels of stack, then the listed objects are freed
414 as needed. NB: The tracking struct is allocated with
415 real_malloc; the user data with wrap_malloc.
417 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
418 static struct alloca_tracking *alloca_history = NULL;
420 void *stack = __builtin_frame_address (0);
421 void *result;
422 struct alloca_tracking *track;
424 TRACE ("%s\n", __PRETTY_FUNCTION__);
425 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
427 /* XXX: thread locking! */
429 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
430 which must therefore have exited by now. */
431 #define DEEPER_THAN < /* for x86 */
432 while (alloca_history &&
433 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
435 struct alloca_tracking *next = alloca_history->next;
436 __mf_unregister (alloca_history->ptr, 0);
437 CALL_REAL (free, alloca_history->ptr);
438 CALL_REAL (free, alloca_history);
439 alloca_history = next;
442 /* Allocate new block. */
443 result = NULL;
444 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
446 track = (struct alloca_tracking *) CALL_REAL (malloc,
447 sizeof (struct alloca_tracking));
448 if (LIKELY (track != NULL))
450 result = CALL_REAL (malloc, c);
451 if (UNLIKELY (result == NULL))
453 CALL_REAL (free, track);
454 /* Too bad. XXX: What about errno? */
456 else
458 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
459 track->ptr = result;
460 track->stack = stack;
461 track->next = alloca_history;
462 alloca_history = track;
467 return result;
471 #undef alloca
472 WRAPPER(void *, alloca, size_t c)
474 return __mf_wrap_alloca_indirect (c);
477 #endif