1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
35 #ifndef HAVE_SOCKLEN_T
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
48 #define __EXTENSIONS__
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
57 #include <sys/types.h>
64 #include "mf-runtime.h"
68 #error "Do not compile this file with -fmudflap!"
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
78 /* A special bootstrap variant. */
80 __mf_0fn_malloc (size_t c
)
82 /* fprintf (stderr, "0fn malloc c=%lu\n", c); */
89 WRAPPER(void *, malloc
, size_t c
)
91 size_t size_with_crumple_zones
;
92 DECLARE(void *, malloc
, size_t c
);
94 BEGIN_PROTECT (malloc
, c
);
96 size_with_crumple_zones
=
97 CLAMPADD(c
,CLAMPADD(__mf_opts
.crumple_zone
,
98 __mf_opts
.crumple_zone
));
99 result
= (char *) CALL_REAL (malloc
, size_with_crumple_zones
);
103 result
+= __mf_opts
.crumple_zone
;
104 __mf_register (result
, c
, __MF_TYPE_HEAP
, "malloc region");
105 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
113 /* A special bootstrap variant. */
115 __mf_0fn_calloc (size_t c
, size_t n
)
117 enum foo
{ BS
= 4096, NB
=10 };
118 static char bufs
[NB
][BS
];
119 static unsigned bufs_used
[NB
];
122 /* fprintf (stderr, "0fn calloc c=%lu n=%lu\n", c, n); */
125 if (! bufs_used
[i
] && (c
*n
) < BS
)
137 WRAPPER(void *, calloc
, size_t c
, size_t n
)
139 size_t size_with_crumple_zones
;
140 DECLARE(void *, calloc
, size_t, size_t);
141 DECLARE(void *, malloc
, size_t);
142 DECLARE(void *, memset
, void *, int, size_t);
144 BEGIN_PROTECT (calloc
, c
, n
);
146 size_with_crumple_zones
=
147 CLAMPADD((c
* n
), /* XXX: CLAMPMUL */
148 CLAMPADD(__mf_opts
.crumple_zone
,
149 __mf_opts
.crumple_zone
));
150 result
= (char *) CALL_REAL (malloc
, size_with_crumple_zones
);
153 memset (result
, 0, size_with_crumple_zones
);
157 result
+= __mf_opts
.crumple_zone
;
158 __mf_register (result
, c
*n
/* XXX: clamp */, __MF_TYPE_HEAP_I
, "calloc region");
159 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
167 /* A special bootstrap variant. */
169 __mf_0fn_realloc (void *buf
, size_t c
)
177 WRAPPER(void *, realloc
, void *buf
, size_t c
)
179 DECLARE(void * , realloc
, void *, size_t);
180 size_t size_with_crumple_zones
;
182 unsigned saved_wipe_heap
;
184 BEGIN_PROTECT (realloc
, buf
, c
);
187 base
-= __mf_opts
.crumple_zone
;
189 size_with_crumple_zones
=
190 CLAMPADD(c
, CLAMPADD(__mf_opts
.crumple_zone
,
191 __mf_opts
.crumple_zone
));
192 result
= (char *) CALL_REAL (realloc
, base
, size_with_crumple_zones
);
194 /* Ensure heap wiping doesn't occur during this peculiar
195 unregister/reregister pair. */
197 __mf_state
= reentrant
;
198 saved_wipe_heap
= __mf_opts
.wipe_heap
;
199 __mf_opts
.wipe_heap
= 0;
202 __mfu_unregister (buf
, 0);
206 result
+= __mf_opts
.crumple_zone
;
207 __mfu_register (result
, c
, __MF_TYPE_HEAP_I
, "realloc region");
208 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
211 /* Restore previous setting. */
212 __mf_opts
.wipe_heap
= saved_wipe_heap
;
222 /* A special bootstrap variant. */
224 __mf_0fn_free (void *buf
)
231 WRAPPER(void, free
, void *buf
)
233 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
234 static void *free_queue
[__MF_FREEQ_MAX
];
235 static unsigned free_ptr
= 0;
236 static int freeq_initialized
= 0;
237 DECLARE(void, free
, void *);
239 BEGIN_PROTECT (free
, buf
);
241 if (UNLIKELY(buf
== NULL
))
245 if (UNLIKELY(!freeq_initialized
))
247 memset (free_queue
, 0,
248 __MF_FREEQ_MAX
* sizeof (void *));
249 freeq_initialized
= 1;
253 __mf_unregister (buf
, 0);
255 if (UNLIKELY(__mf_opts
.free_queue_length
> 0))
259 if (free_queue
[free_ptr
] != NULL
)
261 freeme
= free_queue
[free_ptr
];
262 freeme
-= __mf_opts
.crumple_zone
;
264 free_queue
[free_ptr
] = buf
;
265 free_ptr
= (free_ptr
== (__mf_opts
.free_queue_length
-1) ? 0 : free_ptr
+ 1);
269 if (__mf_opts
.trace_mf_calls
)
271 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
273 __mf_opts
.crumple_zone
);
275 CALL_REAL (free
, freeme
);
280 /* back pointer up a bit to the beginning of crumple zone */
281 char *base
= (char *)buf
;
282 base
-= __mf_opts
.crumple_zone
;
283 if (__mf_opts
.trace_mf_calls
)
285 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
288 __mf_opts
.crumple_zone
);
290 CALL_REAL (free
, base
);
296 /* A special bootstrap variant. */
298 __mf_0fn_mmap (void *start
, size_t l
, int prot
, int f
, int fd
, off_t off
)
306 WRAPPER(void *, mmap
,
307 void *start
, size_t length
, int prot
,
308 int flags
, int fd
, off_t offset
)
310 DECLARE(void *, mmap
, void *, size_t, int,
313 BEGIN_PROTECT (mmap
, start
, length
, prot
, flags
, fd
, offset
);
315 result
= CALL_REAL (mmap
, start
, length
, prot
,
319 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
320 (uintptr_t) start, (uintptr_t) length,
324 if (result
!= (void *)-1)
326 /* Register each page as a heap object. Why not register it all
327 as a single segment? That's so that a later munmap() call
328 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
329 this more automatic? */
330 size_t ps
= getpagesize ();
331 uintptr_t base
= (uintptr_t) result
;
334 for (offset
=0; offset
<length
; offset
+=ps
)
336 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
337 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
338 appropriate for unaccessed mmap pages? */
339 __mf_register ((void *) CLAMPADD (base
, offset
), ps
,
340 __MF_TYPE_HEAP_I
, "mmap page");
349 /* A special bootstrap variant. */
351 __mf_0fn_munmap (void *start
, size_t length
)
359 WRAPPER(int , munmap
, void *start
, size_t length
)
361 DECLARE(int, munmap
, void *, size_t);
363 BEGIN_PROTECT (munmap
, start
, length
);
365 result
= CALL_REAL (munmap
, start
, length
);
368 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
369 (uintptr_t) start, (uintptr_t) length,
375 /* Unregister each page as a heap object. */
376 size_t ps
= getpagesize ();
377 uintptr_t base
= (uintptr_t) start
& (~ (ps
- 1)); /* page align */
380 for (offset
=0; offset
<length
; offset
+=ps
)
381 __mf_unregister ((void *) CLAMPADD (base
, offset
), ps
);
387 /* This wrapper is a little different, as it's called indirectly from
388 __mf_fini also to clean up pending allocations. */
390 __mf_wrap_alloca_indirect (size_t c
)
392 DECLARE (void *, malloc
, size_t);
393 DECLARE (void, free
, void *);
395 /* This struct, a linked list, tracks alloca'd objects. The newest
396 object is at the head of the list. If we detect that we've
397 popped a few levels of stack, then the listed objects are freed
398 as needed. NB: The tracking struct is allocated with
399 real_malloc; the user data with wrap_malloc.
401 struct alloca_tracking
{ void *ptr
; void *stack
; struct alloca_tracking
* next
; };
402 static struct alloca_tracking
*alloca_history
= NULL
;
404 void *stack
= __builtin_frame_address (0);
406 struct alloca_tracking
*track
;
408 TRACE ("%s\n", __PRETTY_FUNCTION__
);
409 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack
);
411 /* XXX: thread locking! */
413 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
414 which must therefore have exited by now. */
416 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
418 while (alloca_history
&&
419 ((uintptr_t) alloca_history
->stack
DEEPER_THAN (uintptr_t) stack
))
421 struct alloca_tracking
*next
= alloca_history
->next
;
422 __mf_unregister (alloca_history
->ptr
, 0);
423 CALL_REAL (free
, alloca_history
->ptr
);
424 CALL_REAL (free
, alloca_history
);
425 alloca_history
= next
;
428 /* Allocate new block. */
430 if (LIKELY (c
> 0)) /* alloca(0) causes no allocation. */
432 track
= (struct alloca_tracking
*) CALL_REAL (malloc
,
433 sizeof (struct alloca_tracking
));
434 if (LIKELY (track
!= NULL
))
436 result
= CALL_REAL (malloc
, c
);
437 if (UNLIKELY (result
== NULL
))
439 CALL_REAL (free
, track
);
440 /* Too bad. XXX: What about errno? */
444 __mf_register (result
, c
, __MF_TYPE_HEAP
, "alloca region");
446 track
->stack
= stack
;
447 track
->next
= alloca_history
;
448 alloca_history
= track
;
458 WRAPPER(void *, alloca
, size_t c
)
460 return __mf_wrap_alloca_indirect (c
);