1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
35 #ifndef HAVE_SOCKLEN_T
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
48 #define __EXTENSIONS__
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
57 #include <sys/types.h>
64 #include "mf-runtime.h"
68 #error "Do not compile this file with -fmudflap!"
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
79 enum { BS
= 4096, NB
=10 };
80 static char __mf_0fn_bufs
[NB
][BS
];
81 static unsigned __mf_0fn_bufs_used
[NB
];
84 /* A special bootstrap variant. */
86 __mf_0fn_malloc (size_t c
)
92 if (! __mf_0fn_bufs_used
[i
] && c
< BS
)
94 __mf_0fn_bufs_used
[i
] = 1;
95 return & __mf_0fn_bufs
[i
][0];
104 WRAPPER(void *, malloc
, size_t c
)
106 size_t size_with_crumple_zones
;
107 DECLARE(void *, malloc
, size_t c
);
109 BEGIN_PROTECT (malloc
, c
);
111 size_with_crumple_zones
=
112 CLAMPADD(c
,CLAMPADD(__mf_opts
.crumple_zone
,
113 __mf_opts
.crumple_zone
));
114 BEGIN_MALLOC_PROTECT ();
115 result
= (char *) CALL_REAL (malloc
, size_with_crumple_zones
);
116 END_MALLOC_PROTECT ();
120 result
+= __mf_opts
.crumple_zone
;
121 __mf_register (result
, c
, __MF_TYPE_HEAP
, "malloc region");
122 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
130 /* A special bootstrap variant. */
132 __mf_0fn_calloc (size_t c
, size_t n
)
134 return __mf_0fn_malloc (c
* n
);
140 WRAPPER(void *, calloc
, size_t c
, size_t n
)
142 size_t size_with_crumple_zones
;
143 DECLARE(void *, calloc
, size_t, size_t);
144 DECLARE(void *, malloc
, size_t);
145 DECLARE(void *, memset
, void *, int, size_t);
147 BEGIN_PROTECT (calloc
, c
, n
);
149 size_with_crumple_zones
=
150 CLAMPADD((c
* n
), /* XXX: CLAMPMUL */
151 CLAMPADD(__mf_opts
.crumple_zone
,
152 __mf_opts
.crumple_zone
));
153 BEGIN_MALLOC_PROTECT ();
154 result
= (char *) CALL_REAL (malloc
, size_with_crumple_zones
);
155 END_MALLOC_PROTECT ();
158 memset (result
, 0, size_with_crumple_zones
);
162 result
+= __mf_opts
.crumple_zone
;
163 __mf_register (result
, c
*n
/* XXX: clamp */, __MF_TYPE_HEAP_I
, "calloc region");
164 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
172 /* A special bootstrap variant. */
174 __mf_0fn_realloc (void *buf
, size_t c
)
182 WRAPPER(void *, realloc
, void *buf
, size_t c
)
184 DECLARE(void * , realloc
, void *, size_t);
185 size_t size_with_crumple_zones
;
187 unsigned saved_wipe_heap
;
189 BEGIN_PROTECT (realloc
, buf
, c
);
192 base
-= __mf_opts
.crumple_zone
;
194 size_with_crumple_zones
=
195 CLAMPADD(c
, CLAMPADD(__mf_opts
.crumple_zone
,
196 __mf_opts
.crumple_zone
));
197 BEGIN_MALLOC_PROTECT ();
198 result
= (char *) CALL_REAL (realloc
, base
, size_with_crumple_zones
);
199 END_MALLOC_PROTECT ();
201 /* Ensure heap wiping doesn't occur during this peculiar
202 unregister/reregister pair. */
204 __mf_set_state (reentrant
);
205 saved_wipe_heap
= __mf_opts
.wipe_heap
;
206 __mf_opts
.wipe_heap
= 0;
209 __mfu_unregister (buf
, 0, __MF_TYPE_HEAP_I
);
210 /* NB: underlying region may have been __MF_TYPE_HEAP. */
214 result
+= __mf_opts
.crumple_zone
;
215 __mfu_register (result
, c
, __MF_TYPE_HEAP_I
, "realloc region");
216 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
219 /* Restore previous setting. */
220 __mf_opts
.wipe_heap
= saved_wipe_heap
;
222 __mf_set_state (active
);
230 /* A special bootstrap variant. */
232 __mf_0fn_free (void *buf
)
239 WRAPPER(void, free
, void *buf
)
241 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
242 static void *free_queue
[__MF_FREEQ_MAX
];
243 static unsigned free_ptr
= 0;
244 static int freeq_initialized
= 0;
245 DECLARE(void, free
, void *);
247 BEGIN_PROTECT (free
, buf
);
249 if (UNLIKELY(buf
== NULL
))
253 /* Check whether the given buffer might have come from a
254 __mf_0fn_malloc/calloc call that for whatever reason was not
255 redirected back to __mf_0fn_free. If so, we just ignore the
257 if (UNLIKELY((uintptr_t) buf
>= (uintptr_t) __mf_0fn_bufs
&&
258 (uintptr_t) buf
< ((uintptr_t) __mf_0fn_bufs
+ sizeof(__mf_0fn_bufs
))))
260 VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf
);
266 if (UNLIKELY(!freeq_initialized
))
268 memset (free_queue
, 0,
269 __MF_FREEQ_MAX
* sizeof (void *));
270 freeq_initialized
= 1;
274 __mf_unregister (buf
, 0, __MF_TYPE_HEAP_I
);
275 /* NB: underlying region may have been __MF_TYPE_HEAP. */
277 if (UNLIKELY(__mf_opts
.free_queue_length
> 0))
281 if (free_queue
[free_ptr
] != NULL
)
283 freeme
= free_queue
[free_ptr
];
284 freeme
-= __mf_opts
.crumple_zone
;
286 free_queue
[free_ptr
] = buf
;
287 free_ptr
= (free_ptr
== (__mf_opts
.free_queue_length
-1) ? 0 : free_ptr
+ 1);
291 if (__mf_opts
.trace_mf_calls
)
293 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
295 __mf_opts
.crumple_zone
);
297 BEGIN_MALLOC_PROTECT ();
298 CALL_REAL (free
, freeme
);
299 END_MALLOC_PROTECT ();
304 /* back pointer up a bit to the beginning of crumple zone */
305 char *base
= (char *)buf
;
306 base
-= __mf_opts
.crumple_zone
;
307 if (__mf_opts
.trace_mf_calls
)
309 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
312 __mf_opts
.crumple_zone
);
314 BEGIN_MALLOC_PROTECT ();
315 CALL_REAL (free
, base
);
316 END_MALLOC_PROTECT ();
321 /* We can only wrap mmap if the target supports it. Likewise for munmap.
322 We assume we have both if we have mmap. */
326 /* A special bootstrap variant. */
328 __mf_0fn_mmap (void *start
, size_t l
, int prot
, int f
, int fd
, off_t off
)
336 WRAPPER(void *, mmap
,
337 void *start
, size_t length
, int prot
,
338 int flags
, int fd
, off_t offset
)
340 DECLARE(void *, mmap
, void *, size_t, int,
343 BEGIN_PROTECT (mmap
, start
, length
, prot
, flags
, fd
, offset
);
345 result
= CALL_REAL (mmap
, start
, length
, prot
,
349 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
350 (uintptr_t) start, (uintptr_t) length,
354 if (result
!= (void *)-1)
356 /* Register each page as a heap object. Why not register it all
357 as a single segment? That's so that a later munmap() call
358 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
359 this more automatic? */
360 size_t ps
= getpagesize ();
361 uintptr_t base
= (uintptr_t) result
;
364 for (offset
=0; offset
<length
; offset
+=ps
)
366 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
367 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
368 appropriate for unaccessed mmap pages? */
369 __mf_register ((void *) CLAMPADD (base
, offset
), ps
,
370 __MF_TYPE_HEAP_I
, "mmap page");
379 /* A special bootstrap variant. */
381 __mf_0fn_munmap (void *start
, size_t length
)
389 WRAPPER(int , munmap
, void *start
, size_t length
)
391 DECLARE(int, munmap
, void *, size_t);
393 BEGIN_PROTECT (munmap
, start
, length
);
395 result
= CALL_REAL (munmap
, start
, length
);
398 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
399 (uintptr_t) start, (uintptr_t) length,
405 /* Unregister each page as a heap object. */
406 size_t ps
= getpagesize ();
407 uintptr_t base
= (uintptr_t) start
& (~ (ps
- 1)); /* page align */
410 for (offset
=0; offset
<length
; offset
+=ps
)
411 __mf_unregister ((void *) CLAMPADD (base
, offset
), ps
, __MF_TYPE_HEAP_I
);
415 #endif /* HAVE_MMAP */
418 /* This wrapper is a little different, as it's called indirectly from
419 __mf_fini also to clean up pending allocations. */
421 __mf_wrap_alloca_indirect (size_t c
)
423 DECLARE (void *, malloc
, size_t);
424 DECLARE (void, free
, void *);
426 /* This struct, a linked list, tracks alloca'd objects. The newest
427 object is at the head of the list. If we detect that we've
428 popped a few levels of stack, then the listed objects are freed
429 as needed. NB: The tracking struct is allocated with
430 real_malloc; the user data with wrap_malloc.
432 struct alloca_tracking
{ void *ptr
; void *stack
; struct alloca_tracking
* next
; };
433 static struct alloca_tracking
*alloca_history
= NULL
;
435 void *stack
= __builtin_frame_address (0);
437 struct alloca_tracking
*track
;
439 TRACE ("%s\n", __PRETTY_FUNCTION__
);
440 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack
);
442 /* XXX: thread locking! */
444 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
445 which must therefore have exited by now. */
447 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
449 while (alloca_history
&&
450 ((uintptr_t) alloca_history
->stack
DEEPER_THAN (uintptr_t) stack
))
452 struct alloca_tracking
*next
= alloca_history
->next
;
453 __mf_unregister (alloca_history
->ptr
, 0, __MF_TYPE_HEAP
);
454 BEGIN_MALLOC_PROTECT ();
455 CALL_REAL (free
, alloca_history
->ptr
);
456 CALL_REAL (free
, alloca_history
);
457 END_MALLOC_PROTECT ();
458 alloca_history
= next
;
461 /* Allocate new block. */
463 if (LIKELY (c
> 0)) /* alloca(0) causes no allocation. */
465 BEGIN_MALLOC_PROTECT ();
466 track
= (struct alloca_tracking
*) CALL_REAL (malloc
,
467 sizeof (struct alloca_tracking
));
468 END_MALLOC_PROTECT ();
469 if (LIKELY (track
!= NULL
))
471 BEGIN_MALLOC_PROTECT ();
472 result
= CALL_REAL (malloc
, c
);
473 END_MALLOC_PROTECT ();
474 if (UNLIKELY (result
== NULL
))
476 BEGIN_MALLOC_PROTECT ();
477 CALL_REAL (free
, track
);
478 END_MALLOC_PROTECT ();
479 /* Too bad. XXX: What about errno? */
483 __mf_register (result
, c
, __MF_TYPE_HEAP
, "alloca region");
485 track
->stack
= stack
;
486 track
->next
= alloca_history
;
487 alloca_history
= track
;
497 WRAPPER(void *, alloca
, size_t c
)
499 return __mf_wrap_alloca_indirect (c
);