1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004, 2009, 2011 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
29 #ifndef HAVE_SOCKLEN_T
34 /* These attempt to coax various unix flavours to declare all our
35 needed tidbits in the system headers. */
36 #if !defined(__FreeBSD__) && !defined(__APPLE__)
38 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
42 #define __EXTENSIONS__
44 #define _LARGE_FILE_API
45 #define _XOPEN_SOURCE_EXTENDED 1
51 #include <sys/types.h>
58 #include "mf-runtime.h"
62 #error "Do not compile this file with -fmudflap!"
66 /* Memory allocation related hook functions. Some of these are
67 intercepted via linker wrapping or symbol interposition. Others
68 use plain macros in mf-runtime.h. */
73 enum { BS
= 4096, NB
=10 };
74 static char __mf_0fn_bufs
[NB
][BS
];
75 static unsigned __mf_0fn_bufs_used
[NB
];
78 /* A special bootstrap variant. */
80 __mf_0fn_malloc (size_t c
)
86 if (! __mf_0fn_bufs_used
[i
] && c
< BS
)
88 __mf_0fn_bufs_used
[i
] = 1;
89 return & __mf_0fn_bufs
[i
][0];
98 WRAPPER(void *, malloc
, size_t c
)
100 size_t size_with_crumple_zones
;
101 DECLARE(void *, malloc
, size_t c
);
103 BEGIN_PROTECT (malloc
, c
);
105 size_with_crumple_zones
=
106 CLAMPADD(c
,CLAMPADD(__mf_opts
.crumple_zone
,
107 __mf_opts
.crumple_zone
));
108 BEGIN_MALLOC_PROTECT ();
109 result
= (char *) CALL_REAL (malloc
, size_with_crumple_zones
);
110 END_MALLOC_PROTECT ();
114 result
+= __mf_opts
.crumple_zone
;
115 __mf_register (result
, c
, __MF_TYPE_HEAP
, "malloc region");
116 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
124 /* A special bootstrap variant. */
126 __mf_0fn_calloc (size_t c
, size_t n
)
128 return __mf_0fn_malloc (c
* n
);
134 WRAPPER(void *, calloc
, size_t c
, size_t n
)
136 size_t size_with_crumple_zones
;
137 DECLARE(void *, calloc
, size_t, size_t);
138 DECLARE(void *, malloc
, size_t);
139 DECLARE(void *, memset
, void *, int, size_t);
141 BEGIN_PROTECT (calloc
, c
, n
);
143 size_with_crumple_zones
=
144 CLAMPADD((c
* n
), /* XXX: CLAMPMUL */
145 CLAMPADD(__mf_opts
.crumple_zone
,
146 __mf_opts
.crumple_zone
));
147 BEGIN_MALLOC_PROTECT ();
148 result
= (char *) CALL_REAL (malloc
, size_with_crumple_zones
);
149 END_MALLOC_PROTECT ();
152 memset (result
, 0, size_with_crumple_zones
);
156 result
+= __mf_opts
.crumple_zone
;
157 __mf_register (result
, c
*n
/* XXX: clamp */, __MF_TYPE_HEAP_I
, "calloc region");
158 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
166 /* A special bootstrap variant. */
168 __mf_0fn_realloc (void *buf
, size_t c
)
176 WRAPPER(void *, realloc
, void *buf
, size_t c
)
178 DECLARE(void * , realloc
, void *, size_t);
179 size_t size_with_crumple_zones
;
181 unsigned saved_wipe_heap
;
183 BEGIN_PROTECT (realloc
, buf
, c
);
186 base
-= __mf_opts
.crumple_zone
;
188 size_with_crumple_zones
=
189 CLAMPADD(c
, CLAMPADD(__mf_opts
.crumple_zone
,
190 __mf_opts
.crumple_zone
));
191 BEGIN_MALLOC_PROTECT ();
192 result
= (char *) CALL_REAL (realloc
, base
, size_with_crumple_zones
);
193 END_MALLOC_PROTECT ();
195 /* Ensure heap wiping doesn't occur during this peculiar
196 unregister/reregister pair. */
198 __mf_set_state (reentrant
);
199 saved_wipe_heap
= __mf_opts
.wipe_heap
;
200 __mf_opts
.wipe_heap
= 0;
203 __mfu_unregister (buf
, 0, __MF_TYPE_HEAP_I
);
204 /* NB: underlying region may have been __MF_TYPE_HEAP. */
208 result
+= __mf_opts
.crumple_zone
;
209 __mfu_register (result
, c
, __MF_TYPE_HEAP_I
, "realloc region");
210 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
213 /* Restore previous setting. */
214 __mf_opts
.wipe_heap
= saved_wipe_heap
;
216 __mf_set_state (active
);
224 /* A special bootstrap variant. */
226 __mf_0fn_free (void *buf
)
233 WRAPPER(void, free
, void *buf
)
235 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
236 static void *free_queue
[__MF_FREEQ_MAX
];
237 static unsigned free_ptr
= 0;
238 static int freeq_initialized
= 0;
239 DECLARE(void, free
, void *);
241 BEGIN_PROTECT (free
, buf
);
243 if (UNLIKELY(buf
== NULL
))
247 /* Check whether the given buffer might have come from a
248 __mf_0fn_malloc/calloc call that for whatever reason was not
249 redirected back to __mf_0fn_free. If so, we just ignore the
251 if (UNLIKELY((uintptr_t) buf
>= (uintptr_t) __mf_0fn_bufs
&&
252 (uintptr_t) buf
< ((uintptr_t) __mf_0fn_bufs
+ sizeof(__mf_0fn_bufs
))))
254 VERBOSE_TRACE ("skipping free of boot (0fn) alloc buffer %p\n", buf
);
260 if (UNLIKELY(!freeq_initialized
))
262 memset (free_queue
, 0,
263 __MF_FREEQ_MAX
* sizeof (void *));
264 freeq_initialized
= 1;
268 __mf_unregister (buf
, 0, __MF_TYPE_HEAP_I
);
269 /* NB: underlying region may have been __MF_TYPE_HEAP. */
271 if (UNLIKELY(__mf_opts
.free_queue_length
> 0))
275 if (free_queue
[free_ptr
] != NULL
)
277 freeme
= free_queue
[free_ptr
];
278 freeme
-= __mf_opts
.crumple_zone
;
280 free_queue
[free_ptr
] = buf
;
281 free_ptr
= (free_ptr
== (__mf_opts
.free_queue_length
-1) ? 0 : free_ptr
+ 1);
285 if (__mf_opts
.trace_mf_calls
)
287 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
289 __mf_opts
.crumple_zone
);
291 BEGIN_MALLOC_PROTECT ();
292 CALL_REAL (free
, freeme
);
293 END_MALLOC_PROTECT ();
298 /* back pointer up a bit to the beginning of crumple zone */
299 char *base
= (char *)buf
;
300 base
-= __mf_opts
.crumple_zone
;
301 if (__mf_opts
.trace_mf_calls
)
303 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
306 __mf_opts
.crumple_zone
);
308 BEGIN_MALLOC_PROTECT ();
309 CALL_REAL (free
, base
);
310 END_MALLOC_PROTECT ();
315 /* We can only wrap mmap if the target supports it. Likewise for munmap.
316 We assume we have both if we have mmap. */
320 /* A special bootstrap variant. */
322 __mf_0fn_mmap (void *start
, size_t l
, int prot
, int f
, int fd
, off_t off
)
324 #if defined(__FreeBSD__)
325 if (f
== 0x1000 && fd
== -1 && prot
== 0 && off
== 0)
327 #endif /* Ignore red zone allocation request for initial thread's stack. */
335 WRAPPER(void *, mmap
,
336 void *start
, size_t length
, int prot
,
337 int flags
, int fd
, off_t offset
)
339 DECLARE(void *, mmap
, void *, size_t, int,
342 BEGIN_PROTECT (mmap
, start
, length
, prot
, flags
, fd
, offset
);
344 result
= CALL_REAL (mmap
, start
, length
, prot
,
348 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
349 (uintptr_t) start, (uintptr_t) length,
353 if (result
!= (void *)-1)
355 /* Register each page as a heap object. Why not register it all
356 as a single segment? That's so that a later munmap() call
357 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
358 this more automatic? */
359 size_t ps
= getpagesize ();
360 uintptr_t base
= (uintptr_t) result
;
363 for (offset
=0; offset
<length
; offset
+=ps
)
365 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
366 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
367 appropriate for unaccessed mmap pages? */
368 __mf_register ((void *) CLAMPADD (base
, offset
), ps
,
369 __MF_TYPE_HEAP_I
, "mmap page");
378 /* A special bootstrap variant. */
380 __mf_0fn_munmap (void *start
, size_t length
)
388 WRAPPER(int , munmap
, void *start
, size_t length
)
390 DECLARE(int, munmap
, void *, size_t);
392 BEGIN_PROTECT (munmap
, start
, length
);
394 result
= CALL_REAL (munmap
, start
, length
);
397 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
398 (uintptr_t) start, (uintptr_t) length,
404 /* Unregister each page as a heap object. */
405 size_t ps
= getpagesize ();
406 uintptr_t base
= (uintptr_t) start
& (~ (ps
- 1)); /* page align */
409 for (offset
=0; offset
<length
; offset
+=ps
)
410 __mf_unregister ((void *) CLAMPADD (base
, offset
), ps
, __MF_TYPE_HEAP_I
);
414 #endif /* HAVE_MMAP */
419 /* A special bootstrap variant. */
421 __mf_0fn_mmap64 (void *start
, size_t l
, int prot
, int f
, int fd
, off64_t off
)
429 WRAPPER(void *, mmap64
,
430 void *start
, size_t length
, int prot
,
431 int flags
, int fd
, off64_t offset
)
433 DECLARE(void *, mmap64
, void *, size_t, int,
436 BEGIN_PROTECT (mmap64
, start
, length
, prot
, flags
, fd
, offset
);
438 result
= CALL_REAL (mmap64
, start
, length
, prot
,
442 VERBOSE_TRACE ("mmap64 (%08lx, %08lx, ...) => %08lx\n",
443 (uintptr_t) start, (uintptr_t) length,
447 if (result
!= (void *)-1)
449 /* Register each page as a heap object. Why not register it all
450 as a single segment? That's so that a later munmap() call
451 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
452 this more automatic? */
453 size_t ps
= getpagesize ();
454 uintptr_t base
= (uintptr_t) result
;
457 for (offset
=0; offset
<length
; offset
+=ps
)
459 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
460 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
461 appropriate for unaccessed mmap pages? */
462 __mf_register ((void *) CLAMPADD (base
, offset
), ps
,
463 __MF_TYPE_HEAP_I
, "mmap64 page");
469 #endif /* HAVE_MMAP64 */
472 /* This wrapper is a little different, as it's called indirectly from
473 __mf_fini also to clean up pending allocations. */
475 __mf_wrap_alloca_indirect (size_t c
)
477 DECLARE (void *, malloc
, size_t);
478 DECLARE (void, free
, void *);
480 /* This struct, a linked list, tracks alloca'd objects. The newest
481 object is at the head of the list. If we detect that we've
482 popped a few levels of stack, then the listed objects are freed
483 as needed. NB: The tracking struct is allocated with
484 real_malloc; the user data with wrap_malloc.
486 struct alloca_tracking
{ void *ptr
; void *stack
; struct alloca_tracking
* next
; };
487 static struct alloca_tracking
*alloca_history
= NULL
;
489 void *stack
= __builtin_frame_address (0);
491 struct alloca_tracking
*track
;
493 TRACE ("%s\n", __PRETTY_FUNCTION__
);
494 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack
);
496 /* XXX: thread locking! */
498 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
499 which must therefore have exited by now. */
501 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
503 while (alloca_history
&&
504 ((uintptr_t) alloca_history
->stack
DEEPER_THAN (uintptr_t) stack
))
506 struct alloca_tracking
*next
= alloca_history
->next
;
507 __mf_unregister (alloca_history
->ptr
, 0, __MF_TYPE_HEAP
);
508 BEGIN_MALLOC_PROTECT ();
509 CALL_REAL (free
, alloca_history
->ptr
);
510 CALL_REAL (free
, alloca_history
);
511 END_MALLOC_PROTECT ();
512 alloca_history
= next
;
515 /* Allocate new block. */
517 if (LIKELY (c
> 0)) /* alloca(0) causes no allocation. */
519 BEGIN_MALLOC_PROTECT ();
520 track
= (struct alloca_tracking
*) CALL_REAL (malloc
,
521 sizeof (struct alloca_tracking
));
522 END_MALLOC_PROTECT ();
523 if (LIKELY (track
!= NULL
))
525 BEGIN_MALLOC_PROTECT ();
526 result
= CALL_REAL (malloc
, c
);
527 END_MALLOC_PROTECT ();
528 if (UNLIKELY (result
== NULL
))
530 BEGIN_MALLOC_PROTECT ();
531 CALL_REAL (free
, track
);
532 END_MALLOC_PROTECT ();
533 /* Too bad. XXX: What about errno? */
537 __mf_register (result
, c
, __MF_TYPE_HEAP
, "alloca region");
539 track
->stack
= stack
;
540 track
->next
= alloca_history
;
541 alloca_history
= track
;
551 WRAPPER(void *, alloca
, size_t c
)
553 return __mf_wrap_alloca_indirect (c
);