Fix vec_merge patterns for Altivec ppc.
[official-gcc.git] / libmudflap / mf-hooks1.c
blob6f9d159e46102b960b7a5844b3aa8c9c13e3d20d
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
33 #include "config.h"
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
40 /* These attempt to coax various unix flavours to declare all our
41 needed tidbits in the system headers. */
42 #if !defined(__FreeBSD__) && !defined(__APPLE__)
43 #define _POSIX_SOURCE
44 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
45 #define _GNU_SOURCE
46 #define _XOPEN_SOURCE
47 #define _BSD_TYPES
48 #define __EXTENSIONS__
49 #define _ALL_SOURCE
50 #define _LARGE_FILE_API
51 #define _XOPEN_SOURCE_EXTENDED 1
53 #include <string.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <sys/time.h>
57 #include <sys/types.h>
58 #include <unistd.h>
59 #include <assert.h>
60 #include <errno.h>
61 #include <limits.h>
62 #include <time.h>
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
72 /* Memory allocation related hook functions. Some of these are
73 intercepted via linker wrapping or symbol interposition. Others
74 use plain macros in mf-runtime.h. */
77 #if PIC
78 /* A special bootstrap variant. */
79 void *
80 __mf_0fn_malloc (size_t c)
82 /* fprintf (stderr, "0fn malloc c=%lu\n", c); */
83 return NULL;
85 #endif
88 #undef malloc
89 WRAPPER(void *, malloc, size_t c)
91 size_t size_with_crumple_zones;
92 DECLARE(void *, malloc, size_t c);
93 void *result;
94 BEGIN_PROTECT (malloc, c);
96 size_with_crumple_zones =
97 CLAMPADD(c,CLAMPADD(__mf_opts.crumple_zone,
98 __mf_opts.crumple_zone));
99 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
101 if (LIKELY(result))
103 result += __mf_opts.crumple_zone;
104 __mf_register (result, c, __MF_TYPE_HEAP, "malloc region");
105 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
108 return result;
112 #ifdef PIC
113 /* A special bootstrap variant. */
114 void *
115 __mf_0fn_calloc (size_t c, size_t n)
117 enum foo { BS = 4096, NB=10 };
118 static char bufs[NB][BS];
119 static unsigned bufs_used[NB];
120 unsigned i;
122 /* fprintf (stderr, "0fn calloc c=%lu n=%lu\n", c, n); */
123 for (i=0; i<NB; i++)
125 if (! bufs_used[i] && (c*n) < BS)
127 bufs_used[i] = 1;
128 return & bufs[i][0];
131 return NULL;
133 #endif
136 #undef calloc
137 WRAPPER(void *, calloc, size_t c, size_t n)
139 size_t size_with_crumple_zones;
140 DECLARE(void *, calloc, size_t, size_t);
141 DECLARE(void *, malloc, size_t);
142 DECLARE(void *, memset, void *, int, size_t);
143 char *result;
144 BEGIN_PROTECT (calloc, c, n);
146 size_with_crumple_zones =
147 CLAMPADD((c * n), /* XXX: CLAMPMUL */
148 CLAMPADD(__mf_opts.crumple_zone,
149 __mf_opts.crumple_zone));
150 result = (char *) CALL_REAL (malloc, size_with_crumple_zones);
152 if (LIKELY(result))
153 memset (result, 0, size_with_crumple_zones);
155 if (LIKELY(result))
157 result += __mf_opts.crumple_zone;
158 __mf_register (result, c*n /* XXX: clamp */, __MF_TYPE_HEAP_I, "calloc region");
159 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
162 return result;
166 #if PIC
167 /* A special bootstrap variant. */
168 void *
169 __mf_0fn_realloc (void *buf, size_t c)
171 return NULL;
173 #endif
176 #undef realloc
177 WRAPPER(void *, realloc, void *buf, size_t c)
179 DECLARE(void * , realloc, void *, size_t);
180 size_t size_with_crumple_zones;
181 char *base = buf;
182 unsigned saved_wipe_heap;
183 char *result;
184 BEGIN_PROTECT (realloc, buf, c);
186 if (LIKELY(buf))
187 base -= __mf_opts.crumple_zone;
189 size_with_crumple_zones =
190 CLAMPADD(c, CLAMPADD(__mf_opts.crumple_zone,
191 __mf_opts.crumple_zone));
192 result = (char *) CALL_REAL (realloc, base, size_with_crumple_zones);
194 /* Ensure heap wiping doesn't occur during this peculiar
195 unregister/reregister pair. */
196 LOCKTH ();
197 __mf_state = reentrant;
198 saved_wipe_heap = __mf_opts.wipe_heap;
199 __mf_opts.wipe_heap = 0;
201 if (LIKELY(buf))
202 __mfu_unregister (buf, 0, __MF_TYPE_HEAP_I);
203 /* NB: underlying region may have been __MF_TYPE_HEAP. */
205 if (LIKELY(result))
207 result += __mf_opts.crumple_zone;
208 __mfu_register (result, c, __MF_TYPE_HEAP_I, "realloc region");
209 /* XXX: register __MF_TYPE_NOACCESS for crumple zones. */
212 /* Restore previous setting. */
213 __mf_opts.wipe_heap = saved_wipe_heap;
215 __mf_state = active;
216 UNLOCKTH ();
218 return result;
222 #if PIC
223 /* A special bootstrap variant. */
224 void
225 __mf_0fn_free (void *buf)
227 return;
229 #endif
231 #undef free
232 WRAPPER(void, free, void *buf)
234 /* Use a circular queue to delay some number (__mf_opts.free_queue_length) of free()s. */
235 static void *free_queue [__MF_FREEQ_MAX];
236 static unsigned free_ptr = 0;
237 static int freeq_initialized = 0;
238 DECLARE(void, free, void *);
240 BEGIN_PROTECT (free, buf);
242 if (UNLIKELY(buf == NULL))
243 return;
245 LOCKTH ();
246 if (UNLIKELY(!freeq_initialized))
248 memset (free_queue, 0,
249 __MF_FREEQ_MAX * sizeof (void *));
250 freeq_initialized = 1;
252 UNLOCKTH ();
254 __mf_unregister (buf, 0, __MF_TYPE_HEAP_I);
255 /* NB: underlying region may have been __MF_TYPE_HEAP. */
257 if (UNLIKELY(__mf_opts.free_queue_length > 0))
259 char *freeme = NULL;
260 LOCKTH ();
261 if (free_queue [free_ptr] != NULL)
263 freeme = free_queue [free_ptr];
264 freeme -= __mf_opts.crumple_zone;
266 free_queue [free_ptr] = buf;
267 free_ptr = (free_ptr == (__mf_opts.free_queue_length-1) ? 0 : free_ptr + 1);
268 UNLOCKTH ();
269 if (freeme)
271 if (__mf_opts.trace_mf_calls)
273 VERBOSE_TRACE ("freeing deferred pointer %p (crumple %u)\n",
274 (void *) freeme,
275 __mf_opts.crumple_zone);
277 CALL_REAL (free, freeme);
280 else
282 /* back pointer up a bit to the beginning of crumple zone */
283 char *base = (char *)buf;
284 base -= __mf_opts.crumple_zone;
285 if (__mf_opts.trace_mf_calls)
287 VERBOSE_TRACE ("freeing pointer %p = %p - %u\n",
288 (void *) base,
289 (void *) buf,
290 __mf_opts.crumple_zone);
292 CALL_REAL (free, base);
297 #if PIC
298 /* A special bootstrap variant. */
299 void *
300 __mf_0fn_mmap (void *start, size_t l, int prot, int f, int fd, off_t off)
302 return (void *) -1;
304 #endif
307 #undef mmap
308 WRAPPER(void *, mmap,
309 void *start, size_t length, int prot,
310 int flags, int fd, off_t offset)
312 DECLARE(void *, mmap, void *, size_t, int,
313 int, int, off_t);
314 void *result;
315 BEGIN_PROTECT (mmap, start, length, prot, flags, fd, offset);
317 result = CALL_REAL (mmap, start, length, prot,
318 flags, fd, offset);
321 VERBOSE_TRACE ("mmap (%08lx, %08lx, ...) => %08lx\n",
322 (uintptr_t) start, (uintptr_t) length,
323 (uintptr_t) result);
326 if (result != (void *)-1)
328 /* Register each page as a heap object. Why not register it all
329 as a single segment? That's so that a later munmap() call
330 can unmap individual pages. XXX: would __MF_TYPE_GUESS make
331 this more automatic? */
332 size_t ps = getpagesize ();
333 uintptr_t base = (uintptr_t) result;
334 uintptr_t offset;
336 for (offset=0; offset<length; offset+=ps)
338 /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */
339 /* XXX: Unaccessed HEAP pages are reported as leaks. Is this
340 appropriate for unaccessed mmap pages? */
341 __mf_register ((void *) CLAMPADD (base, offset), ps,
342 __MF_TYPE_HEAP_I, "mmap page");
346 return result;
350 #if PIC
351 /* A special bootstrap variant. */
353 __mf_0fn_munmap (void *start, size_t length)
355 return -1;
357 #endif
360 #undef munmap
361 WRAPPER(int , munmap, void *start, size_t length)
363 DECLARE(int, munmap, void *, size_t);
364 int result;
365 BEGIN_PROTECT (munmap, start, length);
367 result = CALL_REAL (munmap, start, length);
370 VERBOSE_TRACE ("munmap (%08lx, %08lx, ...) => %08lx\n",
371 (uintptr_t) start, (uintptr_t) length,
372 (uintptr_t) result);
375 if (result == 0)
377 /* Unregister each page as a heap object. */
378 size_t ps = getpagesize ();
379 uintptr_t base = (uintptr_t) start & (~ (ps - 1)); /* page align */
380 uintptr_t offset;
382 for (offset=0; offset<length; offset+=ps)
383 __mf_unregister ((void *) CLAMPADD (base, offset), ps, __MF_TYPE_HEAP_I);
385 return result;
389 /* This wrapper is a little different, as it's called indirectly from
390 __mf_fini also to clean up pending allocations. */
391 void *
392 __mf_wrap_alloca_indirect (size_t c)
394 DECLARE (void *, malloc, size_t);
395 DECLARE (void, free, void *);
397 /* This struct, a linked list, tracks alloca'd objects. The newest
398 object is at the head of the list. If we detect that we've
399 popped a few levels of stack, then the listed objects are freed
400 as needed. NB: The tracking struct is allocated with
401 real_malloc; the user data with wrap_malloc.
403 struct alloca_tracking { void *ptr; void *stack; struct alloca_tracking* next; };
404 static struct alloca_tracking *alloca_history = NULL;
406 void *stack = __builtin_frame_address (0);
407 void *result;
408 struct alloca_tracking *track;
410 TRACE ("%s\n", __PRETTY_FUNCTION__);
411 VERBOSE_TRACE ("alloca stack level %p\n", (void *) stack);
413 /* XXX: thread locking! */
415 /* Free any previously alloca'd blocks that belong to deeper-nested functions,
416 which must therefore have exited by now. */
418 #define DEEPER_THAN < /* XXX: for x86; steal find_stack_direction() from libiberty/alloca.c */
420 while (alloca_history &&
421 ((uintptr_t) alloca_history->stack DEEPER_THAN (uintptr_t) stack))
423 struct alloca_tracking *next = alloca_history->next;
424 __mf_unregister (alloca_history->ptr, 0, __MF_TYPE_HEAP);
425 CALL_REAL (free, alloca_history->ptr);
426 CALL_REAL (free, alloca_history);
427 alloca_history = next;
430 /* Allocate new block. */
431 result = NULL;
432 if (LIKELY (c > 0)) /* alloca(0) causes no allocation. */
434 track = (struct alloca_tracking *) CALL_REAL (malloc,
435 sizeof (struct alloca_tracking));
436 if (LIKELY (track != NULL))
438 result = CALL_REAL (malloc, c);
439 if (UNLIKELY (result == NULL))
441 CALL_REAL (free, track);
442 /* Too bad. XXX: What about errno? */
444 else
446 __mf_register (result, c, __MF_TYPE_HEAP, "alloca region");
447 track->ptr = result;
448 track->stack = stack;
449 track->next = alloca_history;
450 alloca_history = track;
455 return result;
459 #undef alloca
460 WRAPPER(void *, alloca, size_t c)
462 return __mf_wrap_alloca_indirect (c);